pull/2/head
sangta 2024-06-25 10:41:35 +07:00
parent 426b0c34a9
commit d6f7a97fcb
27 changed files with 7509 additions and 3355 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -802,8 +802,7 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: Tinh hinh cac chi em sau khi check so du tai khoan khi thang luong tiep m_text:
theo chua den
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 32ca7ffda2664c077bcf6abfc6f32d0d, type: 2} m_fontAsset: {fileID: 11400000, guid: 32ca7ffda2664c077bcf6abfc6f32d0d, type: 2}
m_sharedMaterial: {fileID: -1949374272958031481, guid: 32ca7ffda2664c077bcf6abfc6f32d0d, type: 2} m_sharedMaterial: {fileID: -1949374272958031481, guid: 32ca7ffda2664c077bcf6abfc6f32d0d, type: 2}
@ -1270,10 +1269,10 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: Soi Ca Phe, Ra Tinh Net! m_text: "Soi C\xE0 Ph\xEA, Ra T\xEDnh N\u1EBFt!"
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 9fdb488a9e528566eb704ef9b2cfba44, type: 2} m_fontAsset: {fileID: 11400000, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
m_sharedMaterial: {fileID: -8730093448591250526, guid: 9fdb488a9e528566eb704ef9b2cfba44, type: 2} m_sharedMaterial: {fileID: -4438248766993109609, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
m_fontSharedMaterials: [] m_fontSharedMaterials: []
m_fontMaterial: {fileID: 0} m_fontMaterial: {fileID: 0}
m_fontMaterials: [] m_fontMaterials: []
@ -1496,7 +1495,7 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: QUET DE TAI ANH VA NHAN QUA m_text: "QU\xC9T \u0110\u1EC2 T\u1EA2I \u1EA2NH V\xC0 NH\u1EACN QU\xC0"
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 67f211977806da85ba63ff22497a20f3, type: 2} m_fontAsset: {fileID: 11400000, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
m_sharedMaterial: {fileID: -4438248766993109609, guid: 67f211977806da85ba63ff22497a20f3, type: 2} m_sharedMaterial: {fileID: -4438248766993109609, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
@ -1697,6 +1696,7 @@ MonoBehaviour:
_rawImage: {fileID: 1199587752} _rawImage: {fileID: 1199587752}
_scanSuccess: {fileID: 11400000, guid: 1d51ed465ae60d5499eed7fd18d21194, type: 2} _scanSuccess: {fileID: 11400000, guid: 1d51ed465ae60d5499eed7fd18d21194, type: 2}
_guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2} _guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2}
_descText: {fileID: 523800084}
--- !u!1 &1388120848 --- !u!1 &1388120848
GameObject: GameObject:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0

View File

@ -161,8 +161,8 @@ RectTransform:
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 45} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 45}
m_AnchorMin: {x: 0.5, y: 0.5} m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5} m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: -90, y: -10} m_AnchoredPosition: {x: -100, y: 0}
m_SizeDelta: {x: 512, y: 512} m_SizeDelta: {x: 700, y: 700}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &39214310 --- !u!114 &39214310
MonoBehaviour: MonoBehaviour:
@ -354,15 +354,15 @@ RectTransform:
m_GameObject: {fileID: 226662302} m_GameObject: {fileID: 226662302}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1} m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0} m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1} m_LocalScale: {x: 0.85, y: 0.85, z: 0.85}
m_ConstrainProportionsScale: 1 m_ConstrainProportionsScale: 1
m_Children: [] m_Children: []
m_Father: {fileID: 39214309} m_Father: {fileID: 39214309}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 0} m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 1, y: 1} m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: 0} m_AnchoredPosition: {x: 0, y: -31}
m_SizeDelta: {x: 0, y: 0} m_SizeDelta: {x: -100, y: -100}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &226662304 --- !u!114 &226662304
MonoBehaviour: MonoBehaviour:
@ -442,8 +442,8 @@ RectTransform:
m_GameObject: {fileID: 263418221} m_GameObject: {fileID: 263418221}
m_LocalRotation: {x: 0, y: 0, z: -0.38268343, w: 0.92387956} m_LocalRotation: {x: 0, y: 0, z: -0.38268343, w: 0.92387956}
m_LocalPosition: {x: 0, y: 0, z: 0} m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1} m_LocalScale: {x: 1.2, y: 1.2, z: 1.2}
m_ConstrainProportionsScale: 0 m_ConstrainProportionsScale: 1
m_Children: m_Children:
- {fileID: 39214309} - {fileID: 39214309}
- {fileID: 1701610192} - {fileID: 1701610192}
@ -549,7 +549,7 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: <size=80>Soi Ca Phe, Ra Tinh Net!</size> m_text: "<size=80>Soi C\xE0 Ph\xEA, Ra T\xEDnh N\u1EBFt!</size>"
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 67f211977806da85ba63ff22497a20f3, type: 2} m_fontAsset: {fileID: 11400000, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
m_sharedMaterial: {fileID: -4438248766993109609, guid: 67f211977806da85ba63ff22497a20f3, type: 2} m_sharedMaterial: {fileID: -4438248766993109609, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
@ -683,10 +683,11 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: Nhin man hinh va khong deo khay trang nhe m_text: "Nh\xECn th\u1EB3ng v\xE0o m\xE0n h\xECnh v\xE0 kh\xF4ng \u0111eo kh\u1EA9u
trang nh\xE9"
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 8f586378b4e144a9851e7b34d9b748ee, type: 2} m_fontAsset: {fileID: 11400000, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
m_sharedMaterial: {fileID: 2180264, guid: 8f586378b4e144a9851e7b34d9b748ee, type: 2} m_sharedMaterial: {fileID: -4438248766993109609, guid: 67f211977806da85ba63ff22497a20f3, type: 2}
m_fontSharedMaterials: [] m_fontSharedMaterials: []
m_fontMaterial: {fileID: 0} m_fontMaterial: {fileID: 0}
m_fontMaterials: [] m_fontMaterials: []
@ -710,8 +711,8 @@ MonoBehaviour:
m_faceColor: m_faceColor:
serializedVersion: 2 serializedVersion: 2
rgba: 4294967295 rgba: 4294967295
m_fontSize: 50 m_fontSize: 45
m_fontSizeBase: 50 m_fontSizeBase: 45
m_fontWeight: 400 m_fontWeight: 400
m_enableAutoSizing: 0 m_enableAutoSizing: 0
m_fontSizeMin: 18 m_fontSizeMin: 18
@ -1196,7 +1197,7 @@ RectTransform:
m_AnchorMin: {x: 0, y: 0} m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 1, y: 1} m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: 0} m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 0, y: 0} m_SizeDelta: {x: 100, y: 100}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &1087577779 --- !u!114 &1087577779
MonoBehaviour: MonoBehaviour:
@ -1622,7 +1623,7 @@ RectTransform:
m_AnchorMin: {x: 0, y: 0} m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 1, y: 1} m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: 0} m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 200, y: 200} m_SizeDelta: {x: 100, y: 100}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!222 &1391297539 --- !u!222 &1391297539
CanvasRenderer: CanvasRenderer:
@ -1665,7 +1666,7 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_Sprite: {fileID: 21300000, guid: be9c33f3a6bc25e4f9ec0dcb6dd26e5e, type: 3} m_Sprite: {fileID: 0}
m_Type: 0 m_Type: 0
m_PreserveAspect: 1 m_PreserveAspect: 1
m_FillCenter: 1 m_FillCenter: 1

View File

@ -18,7 +18,6 @@ namespace GadGame.Scripts
_image = GetComponent<Image>(); _image = GetComponent<Image>();
_image.preserveAspect = _preserveAspect; _image.preserveAspect = _preserveAspect;
_texture = new Texture2D(1, 1); _texture = new Texture2D(1, 1);
} }
public void LoadImage(string streamingData) public void LoadImage(string streamingData)

View File

@ -29,23 +29,23 @@ namespace GadGame.Network
private Process _process; private Process _process;
private void Start() private void Start()
{ {
_process = new Process(); // _process = new Process();
_process.StartInfo.FileName = "/bin/sh"; // _process.StartInfo.FileName = "/bin/sh";
_process.StartInfo.Arguments = $"{Application.streamingAssetsPath}/MergeFace/run.sh"; // _process.StartInfo.Arguments = $"{Application.streamingAssetsPath}/MergeFace/run.sh";
_process.StartInfo.WorkingDirectory = $"{Application.streamingAssetsPath}/MergeFace"; // _process.StartInfo.WorkingDirectory = $"{Application.streamingAssetsPath}/MergeFace";
_process.StartInfo.RedirectStandardOutput = true; // _process.StartInfo.RedirectStandardOutput = true;
_process.StartInfo.RedirectStandardError = true; // _process.StartInfo.RedirectStandardError = true;
_process.StartInfo.CreateNoWindow = false; // _process.StartInfo.CreateNoWindow = false;
_process.StartInfo.UseShellExecute = false; // _process.StartInfo.UseShellExecute = false;
_process.OutputDataReceived += (sender, a) => { // _process.OutputDataReceived += (sender, a) => {
Debug.Log(a.Data); // Debug.Log(a.Data);
}; // };
_process.ErrorDataReceived += (sender, a) => { // _process.ErrorDataReceived += (sender, a) => {
Debug.LogError(a.Data); // Debug.LogError(a.Data);
}; // };
_process.Start(); // _process.Start();
// Create remote endpoint // Create remote endpoint
@ -110,29 +110,29 @@ namespace GadGame.Network
_client.Close(); _client.Close();
Process killProcess = new Process(); // Process killProcess = new Process();
killProcess.StartInfo.FileName = "/bin/sh"; // killProcess.StartInfo.FileName = "/bin/sh";
killProcess.StartInfo.Arguments = $"{Application.streamingAssetsPath}/MergeFace/kill_process.sh"; // killProcess.StartInfo.Arguments = $"{Application.streamingAssetsPath}/MergeFace/kill_process.sh";
killProcess.StartInfo.WorkingDirectory = $"{Application.streamingAssetsPath}/MergeFace"; // killProcess.StartInfo.WorkingDirectory = $"{Application.streamingAssetsPath}/MergeFace";
killProcess.StartInfo.RedirectStandardOutput = true; // killProcess.StartInfo.RedirectStandardOutput = true;
killProcess.StartInfo.RedirectStandardError = true; // killProcess.StartInfo.RedirectStandardError = true;
killProcess.StartInfo.CreateNoWindow = false; // killProcess.StartInfo.CreateNoWindow = false;
killProcess.StartInfo.UseShellExecute = false; // killProcess.StartInfo.UseShellExecute = false;
killProcess.OutputDataReceived += (sender, a) => { // killProcess.OutputDataReceived += (sender, a) => {
Debug.Log(a.Data); // Debug.Log(a.Data);
}; // };
killProcess.ErrorDataReceived += (sender, a) => { // killProcess.ErrorDataReceived += (sender, a) => {
Debug.LogError(a.Data); // Debug.LogError(a.Data);
}; // };
killProcess.Start(); // killProcess.Start();
killProcess.BeginOutputReadLine(); // killProcess.BeginOutputReadLine();
killProcess.BeginErrorReadLine(); // killProcess.BeginErrorReadLine();
_process.Close(); // _process.Close();
_process.CloseMainWindow(); // _process.CloseMainWindow();
_process.WaitForExit(); // _process.WaitForExit();
} }
} }
} }

View File

@ -11,10 +11,12 @@ public class QRShowNewCTA : MonoBehaviour
[SerializeField] private RawImage _rawImage; [SerializeField] private RawImage _rawImage;
[SerializeField] private VoidEvent _scanSuccess; [SerializeField] private VoidEvent _scanSuccess;
[SerializeField] private GuestEvent _guestUpdatedSubscription; [SerializeField] private GuestEvent _guestUpdatedSubscription;
[SerializeField] private TMP_Text _descText;
async void Start() async void Start()
{ {
_descText.text = UdpSocket.Instance.DataReceived.Description;
_rawImage.texture = await P4PGraphqlManager.Instance.GetQrLink(); _rawImage.texture = await P4PGraphqlManager.Instance.GetQrLink();
// _timer.SetDuration(60).Begin(); // _timer.SetDuration(60).Begin();
} }

View File

@ -49,7 +49,7 @@ class MainProgram:
self.all_record = wks.get_all_records() self.all_record = wks.get_all_records()
self.client = NovitaClient("bd00a29d-86a8-4bad-9b8c-e085a5860311") self.client = NovitaClient("48cc2b16-286f-49c8-9581-f409b68359c4")
self.ready_success = False self.ready_success = False
@ -61,7 +61,7 @@ class MainProgram:
def convertFrame(self, frame) -> str: def convertFrame(self, frame) -> str:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90] encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
frame = imutils.resize(frame, width=512) frame = imutils.resize(frame, width=320)
result, encoded_frame = cv2.imencode('.jpg', frame, encode_param) result, encoded_frame = cv2.imencode('.jpg', frame, encode_param)
jpg_as_text = base64.b64encode(encoded_frame.tobytes()) jpg_as_text = base64.b64encode(encoded_frame.tobytes())
@ -252,7 +252,7 @@ class MainProgram:
self.red_zone_height = (self.center_point[1] - 50, self.frame_height) self.red_zone_height = (self.center_point[1] - 50, self.frame_height)
self.face_zone_width = (self.center_point[0] - 150, self.center_point[0] + 150) self.face_zone_width = (self.center_point[0] - 150, self.center_point[0] + 150)
self.face_zone_height = (self.center_point[1] - 200, self.center_point[1]) self.face_zone_height = (self.center_point[1] - 200, self.center_point[1] + 50)
self.face_zone_center_point = ( self.face_zone_center_point = (
int((self.face_zone_width[1] - self.face_zone_width[0]) / 2) + self.face_zone_width[0], int((self.face_zone_width[1] - self.face_zone_width[0]) / 2) + self.face_zone_width[0],
@ -260,6 +260,8 @@ class MainProgram:
ret, frame = cap.read() ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if not ret: if not ret:
continue continue
@ -281,16 +283,16 @@ class MainProgram:
self.sock.SendData(self.send_data_unity) self.sock.SendData(self.send_data_unity)
# cv2.rectangle(self.frame_to_show, (self.face_zone_width[0], self.face_zone_height[0]), cv2.rectangle(self.frame_to_show, (self.face_zone_width[0], self.face_zone_height[0]),
# (self.face_zone_width[1], self.face_zone_height[1]), (self.face_zone_width[1], self.face_zone_height[1]),
# (0, 255, 255), 2) (0, 255, 255), 2)
# cv2.circle(self.frame_to_show, self.face_zone_center_point, 5, (255, 255, 0), -1) cv2.circle(self.frame_to_show, self.face_zone_center_point, 5, (255, 255, 0), -1)
# cv2.imshow("Output", self.frame_to_show) cv2.imshow("Output", self.frame_to_show)
#
# if cv2.waitKey(1) & 0xFF == ord("q"): if cv2.waitKey(1) & 0xFF == ord("q"):
# break break
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()

View File

@ -0,0 +1,323 @@
import cv2
import numpy as np
from lib import *
class MainProgram:
def __init__(self, face_model_path, model_path, reid_weights, tracker_type="deepocsort"):
self.face_model_path = face_model_path
self.model_path = model_path
self.face_model = YOLO(face_model_path)
self.person_model = YOLO(model_path)
self.reid_weights = reid_weights
self.tracker_conf = get_tracker_config(tracker_type)
self.sock = U.UdpComms(udpIP="192.168.1.122", portTX=8000, portRX=8001, enableRX=True, suppressWarnings=True)
self.tracker = create_tracker(
tracker_type=tracker_type,
tracker_config=self.tracker_conf,
reid_weights=reid_weights,
device='0',
half=False,
per_class=False
)
self.send_data_unity: dict = {
"PassBy": False,
"Engage": False,
"Ready": False,
"Gender": None,
"AgeMin": None,
"AgeMax": None,
"GenerateImageSuccess": False,
"Description": ""
}
self.focus_id = None
self.frame_count_remove_idx = 0
sa = gspread.service_account("key.json")
sh = sa.open("TestData")
wks = sh.worksheet("Sheet1")
self.all_record = wks.get_all_records()
self.client = NovitaClient("48cc2b16-286f-49c8-9581-f409b68359c4")
self.ready_success = False
self.show_success = False
self.check_save, self.check_generate = False, False
self.forward_face = Face_detection.FaceDetection()
def convertFrame(self, frame) -> str:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
frame = imutils.resize(frame, width=512)
result, encoded_frame = cv2.imencode('.jpg', frame, encode_param)
jpg_as_text = base64.b64encode(encoded_frame.tobytes())
return jpg_as_text.decode('utf-8')
def get_image(self):
ran_num = random.randint(0, len(self.all_record) - 1)
image_url = self.all_record[ran_num]["Image link"]
des = self.all_record[ran_num]["Note"]
return image_url, des
def generate_image(self):
image_url, des = self.get_image()
res = self.client.merge_face(
image=image_url,
face_image="./image/output.jpg",
)
base64_to_image(res.image_file).save("./image/merge_face.png")
self.send_data_unity["Description"] = des
self.send_data_unity["GenerateImageSuccess"] = True
self.send_data_unity["StreamingData"] = "./Assets/StreamingAssets/MergeFace/image/merge_face.png"
def predict_age_and_gender(self):
image_predict = cv2.imread("./image/output.jpg")
if AgeGenderPrediction.Prediction(image_predict):
self.send_data_unity["Gender"] = AgeGenderPrediction.Prediction(image_predict)[0]
self.send_data_unity["AgeMin"] = int(
AgeGenderPrediction.Prediction(image_predict)[1].split("-")[0])
self.send_data_unity["AgeMax"] = int(
AgeGenderPrediction.Prediction(image_predict)[1].split("-")[1])
else:
self.send_data_unity["Gender"] = None
self.send_data_unity["AgeMin"] = None
self.send_data_unity["AgeMax"] = None
def get_face_bbox(self, frame):
outs = self.face_model(frame)
results = sv.Detections.from_ultralytics(outs[0])
bbox = results.xyxy.astype(np.int_)
conf = results.confidence.astype(np.float32)
return np.concatenate((bbox, conf[:, np.newaxis]), axis=1)
def get_person_bbox(self, frame):
# Perform object detection with YOLOv8 class = 0 indicate person class
outs = self.person_model(frame, classes=[0], conf=0.7)
if not outs[0].boxes.xyxy.tolist():
detection = np.empty((0, 6))
# Extract relevant information from detections for boxmot
else:
boxes = outs[0].boxes.xyxy.tolist()
classes = outs[0].boxes.cls.tolist()
confidences = outs[0].boxes.conf.tolist()
detection = np.array([box + [conf, cls] for box, conf, cls in zip(boxes, confidences, classes)])
return detection
def check_engage(self, x1, x2) -> bool:
if not (x1 > self.red_zone_width[1] or x2 < self.red_zone_width[0]):
return True
return False
def cropped_image(self, frame, x1, y1, x2, y2):
return frame[y1: y2, x1: x2]
def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float:
return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2)
def check_ready(self, x1, y1, x2, y2, frame):
person_frame = self.cropped_image(frame, x1, y1, x2, y2)
# out = self.face_model(person_frame)
# results = sv.Detections.from_ultralytics(out[0])
# bbox = results.xyxy.astype(np.int_)
#
# face_cx, face_cy = (int(bbox[0][0] + x1 + (bbox[0][2] - bbox[0][0]) / 2),
# int(bbox[0][1] + y1 + (bbox[0][3] - bbox[0][1]) / 2))
#
# dis = self.calculate_dis_to_cp()
return self.forward_face.detect_face(person_frame, self.face_zone_center_point[0],
self.face_zone_center_point[1], x1, y1)
def person_process(self, frame):
# Perform person detection
person_detections = self.get_person_bbox(frame)
# Update the tracker with person detections
tracked_objects = self.tracker.update(person_detections, frame)
track_list = []
frame_to_crop = frame.copy()
engage = False
for track in tracked_objects.astype(int):
x1, y1, x2, y2, track_id, conf, cls, _ = track
track_list.append(track_id)
# cv2.rectangle(self.frame_to_show, (x1, y1), (x2, y2), (0, 255, 0), 2)
# cv2.putText(self.frame_to_show, f"ID: {track_id} Conf: {conf:.2f}", (x1, y1 - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
#
# cv2.rectangle(self.frame_to_show, (self.red_zone_width[0], self.red_zone_height[0]),
# (self.red_zone_width[1], self.red_zone_height[1]), (255, 0, 0), 2)
if not engage:
engage = self.check_engage(x1, x2)
if not self.focus_id:
self.focus_id = track_id if self.check_ready(x1, y1, x2, y2, frame_to_crop) else None
elif track_id != self.focus_id:
continue
else:
received_data = self.sock.ReadReceivedData()
if received_data == "Begin":
self.ready_success = True
elif received_data == "End":
self.ready_success = False
self.check_save = False
self.check_generate = False
os.remove("./image/output.jpg")
os.remove("./image/merge_face.png")
self.send_data_unity: dict = {
"PassBy": False,
"Engage": False,
"Ready": False,
"Gender": None,
"AgeMin": None,
"AgeMax": None,
"GenerateImageSuccess": False,
"Description": ""
}
if not self.ready_success:
self.send_data_unity["Ready"] = True if self.check_ready(x1, y1, x2, y2, frame_to_crop) else False
elif not self.check_save:
cv2.imwrite("./image/output.jpg", self.cropped_image(frame, x1, y1, x2, y2))
self.check_save = True
elif not self.check_generate:
if str(self.send_data_unity["Gender"]) == "None":
self.predict_age_and_gender()
else:
self.generate_image()
self.check_generate = True
elif self.show_success:
self.check_save = False
self.check_generate = False
if track_list:
self.send_data_unity["PassBy"] = True
self.send_data_unity["Engage"] = engage
else:
self.send_data_unity["Engage"] = False
self.send_data_unity["PassBy"] = False
self.send_data_unity["Ready"] = False
if self.focus_id not in track_list:
if self.frame_count_remove_idx == 20:
self.frame_count_remove_idx = 0
self.focus_id = None
else:
self.frame_count_remove_idx += 1
else:
self.frame_count_remove_idx = 0
# cv2.putText(self.frame_to_show, f"Focus id: {self.focus_id}", (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
# 1.0, (0, 255, 255), 2)
def __call__(self):
cap = cv2.VideoCapture(0)
while cap.isOpened():
self.frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.center_point = (int(int(self.frame_width) / 2), int(int(self.frame_height) / 2))
self.red_zone_width = (self.center_point[0] - 250, self.center_point[0] + 250)
self.red_zone_height = (self.center_point[1] - 50, self.frame_height)
self.face_zone_width = (self.center_point[0] - 100, self.center_point[0] + 100)
self.face_zone_height = (self.center_point[1] - 200, self.center_point[1])
self.face_zone_center_point = (
int((self.face_zone_width[1] - self.face_zone_width[0]) / 2) + self.face_zone_width[0],
int((self.face_zone_height[1] - self.face_zone_height[0]) / 2) + self.face_zone_height[0])
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if not ret:
continue
frame_to_handle = frame.copy()
self.frame_to_show = frame.copy()
try:
self.person_process(frame_to_handle)
except Exception as e:
print(e)
if not self.send_data_unity["GenerateImageSuccess"]:
self.send_data_unity["StreamingData"] = self.convertFrame(self.cropped_image(frame,
self.face_zone_width[0],
self.face_zone_height[0],
self.face_zone_width[1],
self.face_zone_height[1]))
self.sock.SendData(self.send_data_unity)
cv2.rectangle(self.frame_to_show, (self.face_zone_width[0], self.face_zone_height[0]),
(self.face_zone_width[1], self.face_zone_height[1]),
(0, 255, 255), 2)
cv2.circle(self.frame_to_show, self.face_zone_center_point, 5, (255, 255, 0), -1)
cv2.imshow("Output", self.frame_to_show)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
print("Starting python...")
face_model_path = "face_detect.pt"
model_path = "yolov8n.pt"
tracker_type = "deepocsort"
reid_weights = Path('osnet_x0_25_msmt17.pt')
run_main_program = MainProgram(face_model_path, model_path, reid_weights, tracker_type)
run_main_program()

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 0914c71de71c715788bbd30a30040d92
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 MiB

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: b11869eb1d5f7a9f583dba65483f7043
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 642339e38314c58fd849560f6d2c5fbf
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -28,3 +28,7 @@ import AgeGenderPrediction
import Face_detection import Face_detection
import os import os
from facenet_pytorch import MTCNN
import torch
import math

View File

@ -5,7 +5,7 @@ from lib import *
class MainProgram: class MainProgram:
def __init__(self, face_model_path, model_path, reid_weights, tracker_type="deepocsort"): def __init__(self, face_model_path, model_path, reid_weights, tracker_type="ocsort"):
self.face_model_path = face_model_path self.face_model_path = face_model_path
self.model_path = model_path self.model_path = model_path
@ -49,7 +49,7 @@ class MainProgram:
self.all_record = wks.get_all_records() self.all_record = wks.get_all_records()
self.client = NovitaClient("bd00a29d-86a8-4bad-9b8c-e085a5860311") self.client = NovitaClient("48cc2b16-286f-49c8-9581-f409b68359c4")
self.ready_success = False self.ready_success = False
@ -59,9 +59,14 @@ class MainProgram:
self.forward_face = Face_detection.FaceDetection() self.forward_face = Face_detection.FaceDetection()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.detector = MTCNN(keep_all=True, device=device)
self.count_frame = 0
def convertFrame(self, frame) -> str: def convertFrame(self, frame) -> str:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90] encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
frame = imutils.resize(frame, width=512) frame = imutils.resize(frame, width=480)
result, encoded_frame = cv2.imencode('.jpg', frame, encode_param) result, encoded_frame = cv2.imencode('.jpg', frame, encode_param)
jpg_as_text = base64.b64encode(encoded_frame.tobytes()) jpg_as_text = base64.b64encode(encoded_frame.tobytes())
@ -104,28 +109,6 @@ class MainProgram:
self.send_data_unity["AgeMin"] = None self.send_data_unity["AgeMin"] = None
self.send_data_unity["AgeMax"] = None self.send_data_unity["AgeMax"] = None
def get_face_bbox(self, frame):
outs = self.face_model(frame)
results = sv.Detections.from_ultralytics(outs[0])
bbox = results.xyxy.astype(np.int_)
conf = results.confidence.astype(np.float32)
return np.concatenate((bbox, conf[:, np.newaxis]), axis=1)
def get_person_bbox(self, frame):
# Perform object detection with YOLOv8 class = 0 indicate person class
outs = self.person_model(frame, classes=[0], conf=0.7)
if not outs[0].boxes.xyxy.tolist():
detection = np.empty((0, 6))
# Extract relevant information from detections for boxmot
else:
boxes = outs[0].boxes.xyxy.tolist()
classes = outs[0].boxes.cls.tolist()
confidences = outs[0].boxes.conf.tolist()
detection = np.array([box + [conf, cls] for box, conf, cls in zip(boxes, confidences, classes)])
return detection
def check_engage(self, x1, x2) -> bool: def check_engage(self, x1, x2) -> bool:
if not (x1 > self.red_zone_width[1] or x2 < self.red_zone_width[0]): if not (x1 > self.red_zone_width[1] or x2 < self.red_zone_width[0]):
return True return True
@ -135,22 +118,63 @@ class MainProgram:
def cropped_image(self, frame, x1, y1, x2, y2): def cropped_image(self, frame, x1, y1, x2, y2):
return frame[y1: y2, x1: x2] return frame[y1: y2, x1: x2]
def check_ready(self, x1, y1, x2, y2, frame): def get_face(self, frame):
person_frame = self.cropped_image(frame, x1, y1, x2, y2) boxes, probs, landmarks = self.detector.detect(frame, landmarks=True)
return self.forward_face.detect_face(person_frame, self.face_zone_center_point[0], lm_list = []
self.face_zone_center_point[1], x1, y1)
for landmark in landmarks:
x1, y1, x2, y2 = int(landmark[0][0]), int(landmark[0][1]), int(landmark[1][0]), int(landmark[2][1])
lm_list.append([x1, y1, x2, y2])
if boxes is not None:
bboxes = boxes.astype(np.int_)
confs = probs.astype(np.float32).reshape(-1, 1)
# Create an array of zeros with the same length as bboxes
zeros = np.zeros((bboxes.shape[0], 1), dtype=np.float32)
# Concatenate bboxes, confs, and zeros
combined = np.hstack((lm_list, confs, zeros))
return combined, landmarks, bboxes
else:
return np.array([])
def check_ready(self, nose, left_eye, right_eye):
distance_left = self.forward_face.calculate_distance(nose, left_eye)
distance_right = self.forward_face.calculate_distance(nose, right_eye)
distance_to_point = self.forward_face.calculate_dis_to_cp(self.face_zone_center_point[0],
self.face_zone_center_point[1],
nose[0], nose[1])
cv2.circle(self.frame_to_show, (int(nose[0]), int(nose[1])), 5, (0, 255, 255), -1)
cv2.circle(self.frame_to_show, (int(self.face_zone_center_point[0]), int(self.face_zone_center_point[1])), 5, (0, 255, 255), -1)
# Check if distances exceed threshold
if (distance_left > K_MULTIPLIER * distance_right or distance_right > K_MULTIPLIER * distance_left or
distance_to_point > 30):
if self.count_frame > 200:
self.count_frame = 0
return False
else:
self.count_frame += 1
return True
def person_process(self, frame): def person_process(self, frame):
# Perform person detection # Perform person detection
person_detections = self.get_person_bbox(frame) face_detections, landmarks, bboxes = self.get_face(frame)
# Update the tracker with person detections # Update the tracker with person detections
tracked_objects = self.tracker.update(person_detections, frame) tracked_objects = self.tracker.update(face_detections, frame)
track_list = [] track_list = []
frame_to_crop = frame.copy() face_info = []
nose_pose, left_eye_pose, right_eye_pos = (0, 0), (0, 0), (0, 0)
engage = False engage = False
@ -158,11 +182,23 @@ class MainProgram:
x1, y1, x2, y2, track_id, conf, cls, _ = track x1, y1, x2, y2, track_id, conf, cls, _ = track
track_list.append(track_id) track_list.append(track_id)
for idx in range(len(landmarks) - 1):
x1_lm, y1_lm = int(landmarks[idx][0][0]), int(landmarks[idx][0][1])
if x1_lm == x1 and y1_lm == y1:
nose_pose, left_eye_pose, right_eye_pos = ((landmarks[idx][2][0], landmarks[idx][2][1]),
(landmarks[idx][0][0], landmarks[idx][0][1]),
(landmarks[idx][1][0], landmarks[idx][1][1]))
face_info.append((nose_pose, left_eye_pose, right_eye_pos, track_id, bboxes[idx]))
if not engage: if not engage:
engage = self.check_engage(x1, x2) engage = self.check_engage(x1, x2)
print(self.focus_id)
if not self.focus_id: if not self.focus_id:
self.focus_id = track_id if self.check_ready(x1, y1, x2, y2, frame_to_crop) else None self.focus_id = track_id \
if self.check_ready(nose_pose, left_eye_pose, right_eye_pos) else None
elif track_id != self.focus_id: elif track_id != self.focus_id:
continue continue
@ -191,10 +227,16 @@ class MainProgram:
} }
if not self.ready_success: if not self.ready_success:
self.send_data_unity["Ready"] = True if self.check_ready(x1, y1, x2, y2, frame_to_crop) else False self.send_data_unity["Ready"] = True if self.check_ready(nose_pose, left_eye_pose,
right_eye_pos) else False
elif not self.check_save: elif not self.check_save:
cv2.imwrite("./image/output.jpg", self.cropped_image(frame, x1, y1, x2, y2)) for idx in range(len(face_info) - 1):
if face_info[idx][3] == self.focus_id:
x1_face, y1_face, x2_face, y2_face = (int(face_info[idx][4][0]), int(face_info[idx][4][1]),
int(face_info[idx][4][2]), int(face_info[idx][4][3]))
cv2.imwrite("./image/output.jpg",
self.cropped_image(frame, x1_face, y1_face, x2_face, y2_face))
self.check_save = True self.check_save = True
elif not self.check_generate: elif not self.check_generate:
@ -242,8 +284,8 @@ class MainProgram:
self.red_zone_width = (self.center_point[0] - 250, self.center_point[0] + 250) self.red_zone_width = (self.center_point[0] - 250, self.center_point[0] + 250)
self.red_zone_height = (self.center_point[1] - 50, self.frame_height) self.red_zone_height = (self.center_point[1] - 50, self.frame_height)
self.face_zone_width = (self.center_point[0] - 150, self.center_point[0] + 150) self.face_zone_width = (self.center_point[0] - 100, self.center_point[0] + 100)
self.face_zone_height = (self.center_point[1] - 200, self.center_point[1]) self.face_zone_height = (self.center_point[1] - 150, self.center_point[1] + 50)
self.face_zone_center_point = (int((self.face_zone_width[1] - self.face_zone_width[0]) / 2) + self.face_zone_width[0], self.face_zone_center_point = (int((self.face_zone_width[1] - self.face_zone_width[0]) / 2) + self.face_zone_width[0],
int((self.face_zone_height[1] - self.face_zone_height[0]) / 2) + self.face_zone_height[0]) int((self.face_zone_height[1] - self.face_zone_height[0]) / 2) + self.face_zone_height[0])
@ -256,6 +298,8 @@ class MainProgram:
frame_to_handle = frame.copy() frame_to_handle = frame.copy()
self.frame_to_show = frame.copy() self.frame_to_show = frame.copy()
# self.person_process(frame_to_handle)
try: try:
self.person_process(frame_to_handle) self.person_process(frame_to_handle)
@ -272,9 +316,17 @@ class MainProgram:
self.sock.SendData(self.send_data_unity) self.sock.SendData(self.send_data_unity)
cv2.imshow("Output", self.frame_to_show)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__": if __name__ == "__main__":
print("Starting python...") K_MULTIPLIER = 1.2
face_model_path = "face_detect.pt" face_model_path = "face_detect.pt"
model_path = "yolov8n.pt" model_path = "yolov8n.pt"