Compare commits

..

No commits in common. "bb690609eacd306305d6936f35be0a126a93cfe7" and "d6f7a97fcbd274119e76e90e4f572a200f9ea00f" have entirely different histories.

16 changed files with 160 additions and 261 deletions

File diff suppressed because one or more lines are too long

View File

@ -38,7 +38,7 @@ RenderSettings:
m_ReflectionIntensity: 1
m_CustomReflection: {fileID: 0}
m_Sun: {fileID: 0}
m_IndirectSpecularColor: {r: 0.37311953, g: 0.38074014, b: 0.35872743, a: 1}
m_IndirectSpecularColor: {r: 0.37311918, g: 0.3807398, b: 0.35872716, a: 1}
m_UseRadianceAmbientProbe: 0
--- !u!157 &3
LightmapSettings:
@ -153,7 +153,7 @@ MonoBehaviour:
m_Name:
m_EditorClassIdentifier:
_graphApi: {fileID: 11400000, guid: c2740d22f4ae04448a082f5f49761822, type: 2}
_promotionId: 63fb40435c803000
_promotionId: 63c67c5434403000
_guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2}
--- !u!4 &398718624
Transform:

View File

@ -6,7 +6,6 @@ using TMPro;
using UnityEngine;
using UnityEngine.Serialization;
using UnityEngine.UI;
using GadGame.Network;
namespace GadGame.Scripts.Coffee
{
@ -33,10 +32,8 @@ namespace GadGame.Scripts.Coffee
private bool _isLoading;
private float _timer;
private async void Awake()
private void Awake()
{
await P4PGraphqlManager.Instance.JoinPromotion();
await P4PGraphqlManager.Instance.SubmitGameSession(0);
_idleBg.alpha = 1;
_loading.transform.DOLocalRotate(new Vector3(0, 0, 360), 10 / _loadingSpeed, RotateMode.FastBeyond360)
.SetLoops(-1)
@ -133,7 +130,7 @@ namespace GadGame.Scripts.Coffee
private void SetReadyCountDown(float progress){
// _hintText.text = _texts[1];
_process.fillAmount = 1 - progress ;
_process.fillAmount = 1- progress ;
}
}

View File

@ -25,6 +25,8 @@ namespace GadGame.Scripts
//Decode the Base64 string to a byte array
byte[] imageBytes = UdpSocket.Instance.DataReceived.GenerateImageSuccess ? File.ReadAllBytes(streamingData) : Convert.FromBase64String(streamingData);
// byte[] imageBytes = Convert.FromBase64String(encodeString);
_texture.LoadImage(imageBytes); // Automatically resizes the texture dimensions
// _texture.Apply();
var sprite = Sprite.Create(_texture, new Rect(0, 0, _texture.width, _texture.height), new Vector2(0.5f, 0.5f), 100);

View File

@ -27,7 +27,6 @@ namespace GadGame
string _macAddress = GetMacAddressString();
Debug.Log(_macAddress);
await P4PGraphqlManager.Instance.LoginMachine(_macAddress);
await P4PGraphqlManager.Instance.CreateGuest();
}
private async void Start()

View File

@ -183,7 +183,7 @@ namespace GadGame.Network
var socket = await _graphApi.Subscribe(query);
if (socket.State == WebSocketState.Open)
{
var link = $"https://play4promo.online/brands/{_promotionId}/scan-qr?token={_userAccessToken}&img=";
var link = $"https://play4promo.online/brands/{_promotionId}/scan-qr?token={_userAccessToken}";
Debug.Log(link);
return EncodeTextToQrCode(link);
}

View File

@ -23,13 +23,7 @@ namespace GadGame.State.MainFlowState
public override void Update(float time)
{
Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData);
if(_scanSuccess)
{
UdpSocket.Instance.SendDataToPython("End");
Runner.SetState<IdleState>();
return;
}
if(_scanSuccess) return;
if (!UdpSocket.Instance.DataReceived.Engage)
{
_leaveTimer += Time.deltaTime;

View File

@ -1,7 +0,0 @@
fileFormatVersion: 2
guid: 5d25199b74a8a56878b29d2ae14fac9f
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -1,48 +0,0 @@
from mtcnn import MTCNN
from facenet_pytorch import MTCNN
import torch
import math
import cv2
import numpy as np
K_MULTIPLIER = 1.2
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
detector = MTCNN(keep_all=True, device=device)
class FaceDetection:
def __init__(self,):
self.detector = detector
def calculate_distance(self, p1, p2) -> float:
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float:
return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2)
def detect_face(self, frame, cx, cy) -> bool:
boxes, probs, landmarks = self.detector.detect(frame, landmarks=True)
for box, landmark in zip(boxes, landmarks):
# Draw bounding box
x1, y1, x2, y2 = map(int, box)
face_cx = int(x1 + (x2 - x1) / 2)
face_cy = int(y1 + (y2 - y1) / 2)
if len(landmark) >= 5:
nose = landmark[2]
left_eye = landmark[0]
right_eye = landmark[1]
# Calculate distances
distance_left = self.calculate_distance(nose, left_eye)
distance_right = self.calculate_distance(nose, right_eye)
# Check if distances exceed threshold
if not (distance_left > K_MULTIPLIER * distance_right or distance_right > K_MULTIPLIER * distance_left or
self.calculate_dis_to_cp(cx, cy, face_cx, face_cy) > 30):
return True
return False

View File

@ -1,7 +0,0 @@
fileFormatVersion: 2
guid: f4f9204610747ca8bbc62f2c4c03f4b7
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -51,22 +51,13 @@ class MainProgram:
self.client = NovitaClient("48cc2b16-286f-49c8-9581-f409b68359c4")
self.client_minio = Minio("192.168.1.186:50047",
access_key="play4promo_user",
secret_key="12345678",
secure=False
)
self.bucket_name = "play4promo"
self.des_file = "sid/final_image.jpg"
self.ready_success = False
self.show_success = False
self.check_save, self.check_generate = False, False
self.forward_face = FaceDetection()
self.forward_face = Face_detection.FaceDetection()
def convertFrame(self, frame) -> str:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
@ -86,18 +77,13 @@ class MainProgram:
def generate_image(self):
image_url, des = self.get_image()
image_path = "./image/merge_face.png"
res = self.client.merge_face(
image=image_url,
face_image="./image/output.jpg",
)
base64_to_image(res.image_file).save(image_path)
self.client_minio.fput_object(
self.bucket_name, self.des_file, image_path,
)
base64_to_image(res.image_file).save("./image/merge_face.png")
self.send_data_unity["Description"] = des
self.send_data_unity["GenerateImageSuccess"] = True
@ -126,72 +112,143 @@ class MainProgram:
return np.concatenate((bbox, conf[:, np.newaxis]), axis=1)
def check_engage(self, frame):
def get_person_bbox(self, frame):
# Perform object detection with YOLOv8 class = 0 indicate person class
outs = self.person_model(frame, classes=[0], conf=0.7)
if not outs[0].boxes.xyxy.tolist():
return False
detection = np.empty((0, 6))
# Extract relevant information from detections for boxmot
else:
boxes = outs[0].boxes.xyxy.tolist()
classes = outs[0].boxes.cls.tolist()
confidences = outs[0].boxes.conf.tolist()
detection = np.array([box + [conf, cls] for box, conf, cls in zip(boxes, confidences, classes)])
return detection
def check_engage(self, x1, x2) -> bool:
if not (x1 > self.red_zone_width[1] or x2 < self.red_zone_width[0]):
return True
return False
def cropped_image(self, frame, x1, y1, x2, y2):
return frame[y1: y2, x1: x2]
def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float:
return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2)
def check_ready(self, frame):
return self.forward_face.detect_face(frame, self.face_zone_center_point[0],
self.face_zone_center_point[1])
def check_ready(self, x1, y1, x2, y2, frame):
person_frame = self.cropped_image(frame, x1, y1, x2, y2)
# out = self.face_model(person_frame)
# results = sv.Detections.from_ultralytics(out[0])
# bbox = results.xyxy.astype(np.int_)
#
# face_cx, face_cy = (int(bbox[0][0] + x1 + (bbox[0][2] - bbox[0][0]) / 2),
# int(bbox[0][1] + y1 + (bbox[0][3] - bbox[0][1]) / 2))
#
# dis = self.calculate_dis_to_cp()
return self.forward_face.detect_face(person_frame, self.face_zone_center_point[0],
self.face_zone_center_point[1], x1, y1)
def person_process(self, frame):
# Perform person detection
self.send_data_unity["PassBy"] = self.check_engage(frame)
self.send_data_unity["Engage"] = self.send_data_unity["PassBy"]
person_detections = self.get_person_bbox(frame)
received_data = self.sock.ReadReceivedData()
# Update the tracker with person detections
tracked_objects = self.tracker.update(person_detections, frame)
if received_data == "Begin":
self.ready_success = True
track_list = []
elif received_data == "End":
self.ready_success = False
self.check_save = False
self.check_generate = False
os.remove("./image/output.jpg")
os.remove("./image/merge_face.png")
self.send_data_unity: dict = {
"PassBy": False,
"Engage": False,
"Ready": False,
"Gender": None,
"AgeMin": None,
"AgeMax": None,
"GenerateImageSuccess": False,
"Description": ""
}
frame_to_crop = frame.copy()
if not self.ready_success:
self.send_data_unity["Ready"] = True if self.check_ready(frame) else False
engage = False
elif not self.check_save:
cv2.imwrite("./image/output.jpg", self.cropped_image(frame, self.face_zone_width[0],
self.face_zone_height[0],
self.face_zone_width[1],
self.face_zone_height[1]))
self.check_save = True
for track in tracked_objects.astype(int):
x1, y1, x2, y2, track_id, conf, cls, _ = track
track_list.append(track_id)
# cv2.rectangle(self.frame_to_show, (x1, y1), (x2, y2), (0, 255, 0), 2)
# cv2.putText(self.frame_to_show, f"ID: {track_id} Conf: {conf:.2f}", (x1, y1 - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
#
# cv2.rectangle(self.frame_to_show, (self.red_zone_width[0], self.red_zone_height[0]),
# (self.red_zone_width[1], self.red_zone_height[1]), (255, 0, 0), 2)
elif not self.check_generate:
if str(self.send_data_unity["Gender"]) == "None":
self.predict_age_and_gender()
if not engage:
engage = self.check_engage(x1, x2)
if not self.focus_id:
self.focus_id = track_id if self.check_ready(x1, y1, x2, y2, frame_to_crop) else None
elif track_id != self.focus_id:
continue
else:
self.generate_image()
self.check_generate = True
received_data = self.sock.ReadReceivedData()
elif self.show_success:
self.check_save = False
self.check_generate = False
if received_data == "Begin":
self.ready_success = True
elif received_data == "End":
self.ready_success = False
self.check_save = False
self.check_generate = False
os.remove("./image/output.jpg")
os.remove("./image/merge_face.png")
self.send_data_unity: dict = {
"PassBy": False,
"Engage": False,
"Ready": False,
"Gender": None,
"AgeMin": None,
"AgeMax": None,
"GenerateImageSuccess": False,
"Description": ""
}
if not self.ready_success:
self.send_data_unity["Ready"] = True if self.check_ready(x1, y1, x2, y2, frame_to_crop) else False
elif not self.check_save:
cv2.imwrite("./image/output.jpg", self.cropped_image(frame, x1, y1, x2, y2))
self.check_save = True
elif not self.check_generate:
if str(self.send_data_unity["Gender"]) == "None":
self.predict_age_and_gender()
else:
self.generate_image()
self.check_generate = True
elif self.show_success:
self.check_save = False
self.check_generate = False
if track_list:
self.send_data_unity["PassBy"] = True
self.send_data_unity["Engage"] = engage
else:
self.send_data_unity["Engage"] = False
self.send_data_unity["PassBy"] = False
self.send_data_unity["Ready"] = False
if self.focus_id not in track_list:
if self.frame_count_remove_idx == 20:
self.frame_count_remove_idx = 0
self.focus_id = None
else:
self.frame_count_remove_idx += 1
else:
self.frame_count_remove_idx = 0
# cv2.putText(self.frame_to_show, f"Focus id: {self.focus_id}", (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
# 1.0, (0, 255, 255), 2)
def __call__(self):
cap = cv2.VideoCapture(0)
@ -223,8 +280,6 @@ class MainProgram:
frame_to_handle = frame.copy()
self.frame_to_show = frame.copy()
# self.person_process(frame_to_handle)
try:
self.person_process(frame_to_handle)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 MiB

After

Width:  |  Height:  |  Size: 4.5 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 27 KiB

View File

@ -32,8 +32,3 @@ import os
from facenet_pytorch import MTCNN
import torch
import math
from backup_FD import FaceDetection
from minio import Minio
from minio.error import S3Error