Compare commits

...

2 Commits

Author SHA1 Message Date
sangta bb690609ea update 2024-06-25 15:41:31 +07:00
sangta 82b93f7a72 udpate script download image 2024-06-25 15:40:52 +07:00
16 changed files with 261 additions and 160 deletions

File diff suppressed because one or more lines are too long

View File

@ -38,7 +38,7 @@ RenderSettings:
m_ReflectionIntensity: 1 m_ReflectionIntensity: 1
m_CustomReflection: {fileID: 0} m_CustomReflection: {fileID: 0}
m_Sun: {fileID: 0} m_Sun: {fileID: 0}
m_IndirectSpecularColor: {r: 0.37311918, g: 0.3807398, b: 0.35872716, a: 1} m_IndirectSpecularColor: {r: 0.37311953, g: 0.38074014, b: 0.35872743, a: 1}
m_UseRadianceAmbientProbe: 0 m_UseRadianceAmbientProbe: 0
--- !u!157 &3 --- !u!157 &3
LightmapSettings: LightmapSettings:
@ -153,7 +153,7 @@ MonoBehaviour:
m_Name: m_Name:
m_EditorClassIdentifier: m_EditorClassIdentifier:
_graphApi: {fileID: 11400000, guid: c2740d22f4ae04448a082f5f49761822, type: 2} _graphApi: {fileID: 11400000, guid: c2740d22f4ae04448a082f5f49761822, type: 2}
_promotionId: 63c67c5434403000 _promotionId: 63fb40435c803000
_guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2} _guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2}
--- !u!4 &398718624 --- !u!4 &398718624
Transform: Transform:

View File

@ -6,6 +6,7 @@ using TMPro;
using UnityEngine; using UnityEngine;
using UnityEngine.Serialization; using UnityEngine.Serialization;
using UnityEngine.UI; using UnityEngine.UI;
using GadGame.Network;
namespace GadGame.Scripts.Coffee namespace GadGame.Scripts.Coffee
{ {
@ -32,8 +33,10 @@ namespace GadGame.Scripts.Coffee
private bool _isLoading; private bool _isLoading;
private float _timer; private float _timer;
private void Awake() private async void Awake()
{ {
await P4PGraphqlManager.Instance.JoinPromotion();
await P4PGraphqlManager.Instance.SubmitGameSession(0);
_idleBg.alpha = 1; _idleBg.alpha = 1;
_loading.transform.DOLocalRotate(new Vector3(0, 0, 360), 10 / _loadingSpeed, RotateMode.FastBeyond360) _loading.transform.DOLocalRotate(new Vector3(0, 0, 360), 10 / _loadingSpeed, RotateMode.FastBeyond360)
.SetLoops(-1) .SetLoops(-1)
@ -130,7 +133,7 @@ namespace GadGame.Scripts.Coffee
private void SetReadyCountDown(float progress){ private void SetReadyCountDown(float progress){
// _hintText.text = _texts[1]; // _hintText.text = _texts[1];
_process.fillAmount = 1- progress ; _process.fillAmount = 1 - progress ;
} }
} }

View File

@ -25,8 +25,6 @@ namespace GadGame.Scripts
//Decode the Base64 string to a byte array //Decode the Base64 string to a byte array
byte[] imageBytes = UdpSocket.Instance.DataReceived.GenerateImageSuccess ? File.ReadAllBytes(streamingData) : Convert.FromBase64String(streamingData); byte[] imageBytes = UdpSocket.Instance.DataReceived.GenerateImageSuccess ? File.ReadAllBytes(streamingData) : Convert.FromBase64String(streamingData);
// byte[] imageBytes = Convert.FromBase64String(encodeString);
_texture.LoadImage(imageBytes); // Automatically resizes the texture dimensions _texture.LoadImage(imageBytes); // Automatically resizes the texture dimensions
// _texture.Apply(); // _texture.Apply();
var sprite = Sprite.Create(_texture, new Rect(0, 0, _texture.width, _texture.height), new Vector2(0.5f, 0.5f), 100); var sprite = Sprite.Create(_texture, new Rect(0, 0, _texture.width, _texture.height), new Vector2(0.5f, 0.5f), 100);

View File

@ -27,6 +27,7 @@ namespace GadGame
string _macAddress = GetMacAddressString(); string _macAddress = GetMacAddressString();
Debug.Log(_macAddress); Debug.Log(_macAddress);
await P4PGraphqlManager.Instance.LoginMachine(_macAddress); await P4PGraphqlManager.Instance.LoginMachine(_macAddress);
await P4PGraphqlManager.Instance.CreateGuest();
} }
private async void Start() private async void Start()

View File

@ -183,7 +183,7 @@ namespace GadGame.Network
var socket = await _graphApi.Subscribe(query); var socket = await _graphApi.Subscribe(query);
if (socket.State == WebSocketState.Open) if (socket.State == WebSocketState.Open)
{ {
var link = $"https://play4promo.online/brands/{_promotionId}/scan-qr?token={_userAccessToken}"; var link = $"https://play4promo.online/brands/{_promotionId}/scan-qr?token={_userAccessToken}&img=";
Debug.Log(link); Debug.Log(link);
return EncodeTextToQrCode(link); return EncodeTextToQrCode(link);
} }

View File

@ -23,7 +23,13 @@ namespace GadGame.State.MainFlowState
public override void Update(float time) public override void Update(float time)
{ {
Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData); Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData);
if(_scanSuccess) return; if(_scanSuccess)
{
UdpSocket.Instance.SendDataToPython("End");
Runner.SetState<IdleState>();
return;
}
if (!UdpSocket.Instance.DataReceived.Engage) if (!UdpSocket.Instance.DataReceived.Engage)
{ {
_leaveTimer += Time.deltaTime; _leaveTimer += Time.deltaTime;

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 5d25199b74a8a56878b29d2ae14fac9f
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,48 @@
from mtcnn import MTCNN
from facenet_pytorch import MTCNN
import torch
import math
import cv2
import numpy as np
K_MULTIPLIER = 1.2
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
detector = MTCNN(keep_all=True, device=device)
class FaceDetection:
def __init__(self,):
self.detector = detector
def calculate_distance(self, p1, p2) -> float:
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float:
return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2)
def detect_face(self, frame, cx, cy) -> bool:
boxes, probs, landmarks = self.detector.detect(frame, landmarks=True)
for box, landmark in zip(boxes, landmarks):
# Draw bounding box
x1, y1, x2, y2 = map(int, box)
face_cx = int(x1 + (x2 - x1) / 2)
face_cy = int(y1 + (y2 - y1) / 2)
if len(landmark) >= 5:
nose = landmark[2]
left_eye = landmark[0]
right_eye = landmark[1]
# Calculate distances
distance_left = self.calculate_distance(nose, left_eye)
distance_right = self.calculate_distance(nose, right_eye)
# Check if distances exceed threshold
if not (distance_left > K_MULTIPLIER * distance_right or distance_right > K_MULTIPLIER * distance_left or
self.calculate_dis_to_cp(cx, cy, face_cx, face_cy) > 30):
return True
return False

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: f4f9204610747ca8bbc62f2c4c03f4b7
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -51,13 +51,22 @@ class MainProgram:
self.client = NovitaClient("48cc2b16-286f-49c8-9581-f409b68359c4") self.client = NovitaClient("48cc2b16-286f-49c8-9581-f409b68359c4")
self.client_minio = Minio("192.168.1.186:50047",
access_key="play4promo_user",
secret_key="12345678",
secure=False
)
self.bucket_name = "play4promo"
self.des_file = "sid/final_image.jpg"
self.ready_success = False self.ready_success = False
self.show_success = False self.show_success = False
self.check_save, self.check_generate = False, False self.check_save, self.check_generate = False, False
self.forward_face = Face_detection.FaceDetection() self.forward_face = FaceDetection()
def convertFrame(self, frame) -> str: def convertFrame(self, frame) -> str:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90] encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
@ -77,13 +86,18 @@ class MainProgram:
def generate_image(self): def generate_image(self):
image_url, des = self.get_image() image_url, des = self.get_image()
image_path = "./image/merge_face.png"
res = self.client.merge_face( res = self.client.merge_face(
image=image_url, image=image_url,
face_image="./image/output.jpg", face_image="./image/output.jpg",
) )
base64_to_image(res.image_file).save("./image/merge_face.png") base64_to_image(res.image_file).save(image_path)
self.client_minio.fput_object(
self.bucket_name, self.des_file, image_path,
)
self.send_data_unity["Description"] = des self.send_data_unity["Description"] = des
self.send_data_unity["GenerateImageSuccess"] = True self.send_data_unity["GenerateImageSuccess"] = True
@ -112,143 +126,72 @@ class MainProgram:
return np.concatenate((bbox, conf[:, np.newaxis]), axis=1) return np.concatenate((bbox, conf[:, np.newaxis]), axis=1)
def get_person_bbox(self, frame): def check_engage(self, frame):
# Perform object detection with YOLOv8 class = 0 indicate person class # Perform object detection with YOLOv8 class = 0 indicate person class
outs = self.person_model(frame, classes=[0], conf=0.7) outs = self.person_model(frame, classes=[0], conf=0.7)
if not outs[0].boxes.xyxy.tolist(): if not outs[0].boxes.xyxy.tolist():
detection = np.empty((0, 6)) return False
# Extract relevant information from detections for boxmot
else: else:
boxes = outs[0].boxes.xyxy.tolist()
classes = outs[0].boxes.cls.tolist()
confidences = outs[0].boxes.conf.tolist()
detection = np.array([box + [conf, cls] for box, conf, cls in zip(boxes, confidences, classes)])
return detection
def check_engage(self, x1, x2) -> bool:
if not (x1 > self.red_zone_width[1] or x2 < self.red_zone_width[0]):
return True return True
return False
def cropped_image(self, frame, x1, y1, x2, y2): def cropped_image(self, frame, x1, y1, x2, y2):
return frame[y1: y2, x1: x2] return frame[y1: y2, x1: x2]
def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float: def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float:
return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2) return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2)
def check_ready(self, x1, y1, x2, y2, frame): def check_ready(self, frame):
person_frame = self.cropped_image(frame, x1, y1, x2, y2) return self.forward_face.detect_face(frame, self.face_zone_center_point[0],
self.face_zone_center_point[1])
# out = self.face_model(person_frame)
# results = sv.Detections.from_ultralytics(out[0])
# bbox = results.xyxy.astype(np.int_)
#
# face_cx, face_cy = (int(bbox[0][0] + x1 + (bbox[0][2] - bbox[0][0]) / 2),
# int(bbox[0][1] + y1 + (bbox[0][3] - bbox[0][1]) / 2))
#
# dis = self.calculate_dis_to_cp()
return self.forward_face.detect_face(person_frame, self.face_zone_center_point[0],
self.face_zone_center_point[1], x1, y1)
def person_process(self, frame): def person_process(self, frame):
# Perform person detection # Perform person detection
person_detections = self.get_person_bbox(frame) self.send_data_unity["PassBy"] = self.check_engage(frame)
self.send_data_unity["Engage"] = self.send_data_unity["PassBy"]
# Update the tracker with person detections received_data = self.sock.ReadReceivedData()
tracked_objects = self.tracker.update(person_detections, frame)
track_list = [] if received_data == "Begin":
self.ready_success = True
frame_to_crop = frame.copy() elif received_data == "End":
self.ready_success = False
self.check_save = False
self.check_generate = False
os.remove("./image/output.jpg")
os.remove("./image/merge_face.png")
self.send_data_unity: dict = {
"PassBy": False,
"Engage": False,
"Ready": False,
"Gender": None,
"AgeMin": None,
"AgeMax": None,
"GenerateImageSuccess": False,
"Description": ""
}
engage = False if not self.ready_success:
self.send_data_unity["Ready"] = True if self.check_ready(frame) else False
for track in tracked_objects.astype(int): elif not self.check_save:
x1, y1, x2, y2, track_id, conf, cls, _ = track cv2.imwrite("./image/output.jpg", self.cropped_image(frame, self.face_zone_width[0],
track_list.append(track_id) self.face_zone_height[0],
# cv2.rectangle(self.frame_to_show, (x1, y1), (x2, y2), (0, 255, 0), 2) self.face_zone_width[1],
# cv2.putText(self.frame_to_show, f"ID: {track_id} Conf: {conf:.2f}", (x1, y1 - 10), self.face_zone_height[1]))
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) self.check_save = True
#
# cv2.rectangle(self.frame_to_show, (self.red_zone_width[0], self.red_zone_height[0]),
# (self.red_zone_width[1], self.red_zone_height[1]), (255, 0, 0), 2)
if not engage: elif not self.check_generate:
engage = self.check_engage(x1, x2) if str(self.send_data_unity["Gender"]) == "None":
self.predict_age_and_gender()
if not self.focus_id:
self.focus_id = track_id if self.check_ready(x1, y1, x2, y2, frame_to_crop) else None
elif track_id != self.focus_id:
continue
else: else:
received_data = self.sock.ReadReceivedData() self.generate_image()
self.check_generate = True
if received_data == "Begin": elif self.show_success:
self.ready_success = True self.check_save = False
self.check_generate = False
elif received_data == "End":
self.ready_success = False
self.check_save = False
self.check_generate = False
os.remove("./image/output.jpg")
os.remove("./image/merge_face.png")
self.send_data_unity: dict = {
"PassBy": False,
"Engage": False,
"Ready": False,
"Gender": None,
"AgeMin": None,
"AgeMax": None,
"GenerateImageSuccess": False,
"Description": ""
}
if not self.ready_success:
self.send_data_unity["Ready"] = True if self.check_ready(x1, y1, x2, y2, frame_to_crop) else False
elif not self.check_save:
cv2.imwrite("./image/output.jpg", self.cropped_image(frame, x1, y1, x2, y2))
self.check_save = True
elif not self.check_generate:
if str(self.send_data_unity["Gender"]) == "None":
self.predict_age_and_gender()
else:
self.generate_image()
self.check_generate = True
elif self.show_success:
self.check_save = False
self.check_generate = False
if track_list:
self.send_data_unity["PassBy"] = True
self.send_data_unity["Engage"] = engage
else:
self.send_data_unity["Engage"] = False
self.send_data_unity["PassBy"] = False
self.send_data_unity["Ready"] = False
if self.focus_id not in track_list:
if self.frame_count_remove_idx == 20:
self.frame_count_remove_idx = 0
self.focus_id = None
else:
self.frame_count_remove_idx += 1
else:
self.frame_count_remove_idx = 0
# cv2.putText(self.frame_to_show, f"Focus id: {self.focus_id}", (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
# 1.0, (0, 255, 255), 2)
def __call__(self): def __call__(self):
cap = cv2.VideoCapture(0) cap = cv2.VideoCapture(0)
@ -280,6 +223,8 @@ class MainProgram:
frame_to_handle = frame.copy() frame_to_handle = frame.copy()
self.frame_to_show = frame.copy() self.frame_to_show = frame.copy()
# self.person_process(frame_to_handle)
try: try:
self.person_process(frame_to_handle) self.person_process(frame_to_handle)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 MiB

After

Width:  |  Height:  |  Size: 4.5 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -32,3 +32,8 @@ import os
from facenet_pytorch import MTCNN from facenet_pytorch import MTCNN
import torch import torch
import math import math
from backup_FD import FaceDetection
from minio import Minio
from minio.error import S3Error