udpate script download image

feature/gadgame
sangta 2024-06-25 15:40:52 +07:00
parent d6f7a97fcb
commit 82b93f7a72
15 changed files with 260 additions and 159 deletions

File diff suppressed because one or more lines are too long

View File

@ -38,7 +38,7 @@ RenderSettings:
m_ReflectionIntensity: 1
m_CustomReflection: {fileID: 0}
m_Sun: {fileID: 0}
m_IndirectSpecularColor: {r: 0.37311918, g: 0.3807398, b: 0.35872716, a: 1}
m_IndirectSpecularColor: {r: 0.37311953, g: 0.38074014, b: 0.35872743, a: 1}
m_UseRadianceAmbientProbe: 0
--- !u!157 &3
LightmapSettings:
@ -153,7 +153,7 @@ MonoBehaviour:
m_Name:
m_EditorClassIdentifier:
_graphApi: {fileID: 11400000, guid: c2740d22f4ae04448a082f5f49761822, type: 2}
_promotionId: 63c67c5434403000
_promotionId: 63fb40435c803000
_guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2}
--- !u!4 &398718624
Transform:

View File

@ -6,6 +6,7 @@ using TMPro;
using UnityEngine;
using UnityEngine.Serialization;
using UnityEngine.UI;
using GadGame.Network;
namespace GadGame.Scripts.Coffee
{
@ -32,8 +33,10 @@ namespace GadGame.Scripts.Coffee
private bool _isLoading;
private float _timer;
private void Awake()
private async void Awake()
{
await P4PGraphqlManager.Instance.JoinPromotion();
await P4PGraphqlManager.Instance.SubmitGameSession(0);
_idleBg.alpha = 1;
_loading.transform.DOLocalRotate(new Vector3(0, 0, 360), 10 / _loadingSpeed, RotateMode.FastBeyond360)
.SetLoops(-1)

View File

@ -25,8 +25,6 @@ namespace GadGame.Scripts
//Decode the Base64 string to a byte array
byte[] imageBytes = UdpSocket.Instance.DataReceived.GenerateImageSuccess ? File.ReadAllBytes(streamingData) : Convert.FromBase64String(streamingData);
// byte[] imageBytes = Convert.FromBase64String(encodeString);
_texture.LoadImage(imageBytes); // Automatically resizes the texture dimensions
// _texture.Apply();
var sprite = Sprite.Create(_texture, new Rect(0, 0, _texture.width, _texture.height), new Vector2(0.5f, 0.5f), 100);

View File

@ -27,6 +27,7 @@ namespace GadGame
string _macAddress = GetMacAddressString();
Debug.Log(_macAddress);
await P4PGraphqlManager.Instance.LoginMachine(_macAddress);
await P4PGraphqlManager.Instance.CreateGuest();
}
private async void Start()

View File

@ -23,7 +23,13 @@ namespace GadGame.State.MainFlowState
public override void Update(float time)
{
Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData);
if(_scanSuccess) return;
if(_scanSuccess)
{
UdpSocket.Instance.SendDataToPython("End");
Runner.SetState<IdleState>();
return;
}
if (!UdpSocket.Instance.DataReceived.Engage)
{
_leaveTimer += Time.deltaTime;

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 5d25199b74a8a56878b29d2ae14fac9f
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,48 @@
from mtcnn import MTCNN
from facenet_pytorch import MTCNN
import torch
import math
import cv2
import numpy as np
K_MULTIPLIER = 1.2
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
detector = MTCNN(keep_all=True, device=device)
class FaceDetection:
def __init__(self,):
self.detector = detector
def calculate_distance(self, p1, p2) -> float:
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float:
return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2)
def detect_face(self, frame, cx, cy) -> bool:
boxes, probs, landmarks = self.detector.detect(frame, landmarks=True)
for box, landmark in zip(boxes, landmarks):
# Draw bounding box
x1, y1, x2, y2 = map(int, box)
face_cx = int(x1 + (x2 - x1) / 2)
face_cy = int(y1 + (y2 - y1) / 2)
if len(landmark) >= 5:
nose = landmark[2]
left_eye = landmark[0]
right_eye = landmark[1]
# Calculate distances
distance_left = self.calculate_distance(nose, left_eye)
distance_right = self.calculate_distance(nose, right_eye)
# Check if distances exceed threshold
if not (distance_left > K_MULTIPLIER * distance_right or distance_right > K_MULTIPLIER * distance_left or
self.calculate_dis_to_cp(cx, cy, face_cx, face_cy) > 30):
return True
return False

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: f4f9204610747ca8bbc62f2c4c03f4b7
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -51,13 +51,22 @@ class MainProgram:
self.client = NovitaClient("48cc2b16-286f-49c8-9581-f409b68359c4")
self.client_minio = Minio("192.168.1.186:50047",
access_key="play4promo_user",
secret_key="12345678",
secure=False
)
self.bucket_name = "play4promo"
self.des_file = "sid/final_image.jpg"
self.ready_success = False
self.show_success = False
self.check_save, self.check_generate = False, False
self.forward_face = Face_detection.FaceDetection()
self.forward_face = FaceDetection()
def convertFrame(self, frame) -> str:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
@ -77,13 +86,18 @@ class MainProgram:
def generate_image(self):
image_url, des = self.get_image()
image_path = "./image/merge_face.png"
res = self.client.merge_face(
image=image_url,
face_image="./image/output.jpg",
)
base64_to_image(res.image_file).save("./image/merge_face.png")
base64_to_image(res.image_file).save(image_path)
self.client_minio.fput_object(
self.bucket_name, self.des_file, image_path,
)
self.send_data_unity["Description"] = des
self.send_data_unity["GenerateImageSuccess"] = True
@ -112,25 +126,13 @@ class MainProgram:
return np.concatenate((bbox, conf[:, np.newaxis]), axis=1)
def get_person_bbox(self, frame):
def check_engage(self, frame):
# Perform object detection with YOLOv8 class = 0 indicate person class
outs = self.person_model(frame, classes=[0], conf=0.7)
if not outs[0].boxes.xyxy.tolist():
detection = np.empty((0, 6))
# Extract relevant information from detections for boxmot
else:
boxes = outs[0].boxes.xyxy.tolist()
classes = outs[0].boxes.cls.tolist()
confidences = outs[0].boxes.conf.tolist()
detection = np.array([box + [conf, cls] for box, conf, cls in zip(boxes, confidences, classes)])
return detection
def check_engage(self, x1, x2) -> bool:
if not (x1 > self.red_zone_width[1] or x2 < self.red_zone_width[0]):
return True
return False
else:
return True
def cropped_image(self, frame, x1, y1, x2, y2):
return frame[y1: y2, x1: x2]
@ -138,54 +140,15 @@ class MainProgram:
def calculate_dis_to_cp(self, cx, cy, face_cx, face_cy) -> float:
return math.sqrt((face_cx - cx) ** 2 + (face_cy - cy) ** 2)
def check_ready(self, x1, y1, x2, y2, frame):
person_frame = self.cropped_image(frame, x1, y1, x2, y2)
# out = self.face_model(person_frame)
# results = sv.Detections.from_ultralytics(out[0])
# bbox = results.xyxy.astype(np.int_)
#
# face_cx, face_cy = (int(bbox[0][0] + x1 + (bbox[0][2] - bbox[0][0]) / 2),
# int(bbox[0][1] + y1 + (bbox[0][3] - bbox[0][1]) / 2))
#
# dis = self.calculate_dis_to_cp()
return self.forward_face.detect_face(person_frame, self.face_zone_center_point[0],
self.face_zone_center_point[1], x1, y1)
def check_ready(self, frame):
return self.forward_face.detect_face(frame, self.face_zone_center_point[0],
self.face_zone_center_point[1])
def person_process(self, frame):
# Perform person detection
person_detections = self.get_person_bbox(frame)
self.send_data_unity["PassBy"] = self.check_engage(frame)
self.send_data_unity["Engage"] = self.send_data_unity["PassBy"]
# Update the tracker with person detections
tracked_objects = self.tracker.update(person_detections, frame)
track_list = []
frame_to_crop = frame.copy()
engage = False
for track in tracked_objects.astype(int):
x1, y1, x2, y2, track_id, conf, cls, _ = track
track_list.append(track_id)
# cv2.rectangle(self.frame_to_show, (x1, y1), (x2, y2), (0, 255, 0), 2)
# cv2.putText(self.frame_to_show, f"ID: {track_id} Conf: {conf:.2f}", (x1, y1 - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
#
# cv2.rectangle(self.frame_to_show, (self.red_zone_width[0], self.red_zone_height[0]),
# (self.red_zone_width[1], self.red_zone_height[1]), (255, 0, 0), 2)
if not engage:
engage = self.check_engage(x1, x2)
if not self.focus_id:
self.focus_id = track_id if self.check_ready(x1, y1, x2, y2, frame_to_crop) else None
elif track_id != self.focus_id:
continue
else:
received_data = self.sock.ReadReceivedData()
if received_data == "Begin":
@ -209,10 +172,13 @@ class MainProgram:
}
if not self.ready_success:
self.send_data_unity["Ready"] = True if self.check_ready(x1, y1, x2, y2, frame_to_crop) else False
self.send_data_unity["Ready"] = True if self.check_ready(frame) else False
elif not self.check_save:
cv2.imwrite("./image/output.jpg", self.cropped_image(frame, x1, y1, x2, y2))
cv2.imwrite("./image/output.jpg", self.cropped_image(frame, self.face_zone_width[0],
self.face_zone_height[0],
self.face_zone_width[1],
self.face_zone_height[1]))
self.check_save = True
elif not self.check_generate:
@ -227,29 +193,6 @@ class MainProgram:
self.check_save = False
self.check_generate = False
if track_list:
self.send_data_unity["PassBy"] = True
self.send_data_unity["Engage"] = engage
else:
self.send_data_unity["Engage"] = False
self.send_data_unity["PassBy"] = False
self.send_data_unity["Ready"] = False
if self.focus_id not in track_list:
if self.frame_count_remove_idx == 20:
self.frame_count_remove_idx = 0
self.focus_id = None
else:
self.frame_count_remove_idx += 1
else:
self.frame_count_remove_idx = 0
# cv2.putText(self.frame_to_show, f"Focus id: {self.focus_id}", (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
# 1.0, (0, 255, 255), 2)
def __call__(self):
cap = cv2.VideoCapture(0)
@ -280,6 +223,8 @@ class MainProgram:
frame_to_handle = frame.copy()
self.frame_to_show = frame.copy()
# self.person_process(frame_to_handle)
try:
self.person_process(frame_to_handle)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 MiB

After

Width:  |  Height:  |  Size: 4.5 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -32,3 +32,8 @@ import os
from facenet_pytorch import MTCNN
import torch
import math
from backup_FD import FaceDetection
from minio import Minio
from minio.error import S3Error