Compare commits

..

2 Commits

Author SHA1 Message Date
sangta 521b415682 update 2024-06-27 15:08:10 +07:00
sangta 0bd7c1a981 update 2024-06-27 15:07:58 +07:00
275 changed files with 13436 additions and 878 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -18,7 +18,7 @@ MonoBehaviour:
- name: LoginAsGameMachine - name: LoginAsGameMachine
type: 1 type: 1
query: "mutation LoginAsGameMachine{\n loginAsGameMachine( input :{ macAddress query: "mutation LoginAsGameMachine{\n loginAsGameMachine( input :{ macAddress
:\"D85ED3741515\", password :\"Sangta@123\"} ){\n accessToken\n :\"D8BBC1004EF5\", password :\"Sangta@123\"} ){\n accessToken\n
refreshToken\n }\n}" refreshToken\n }\n}"
queryString: loginAsGameMachine queryString: loginAsGameMachine
returnType: Game returnType: Game
@ -95,7 +95,7 @@ MonoBehaviour:
isComplete: 1 isComplete: 1
- name: CreateGuest - name: CreateGuest
type: 1 type: 1
query: "mutation CreateGuest{\n createGuest( input :{ password :\"Abc@123\"} query: "mutation CreateGuest{\n createGuest( input :{ password :\"Sangta@123\"}
){\n accessToken\n refreshToken\n user{\n id\n ){\n accessToken\n refreshToken\n user{\n id\n
}\n }\n}" }\n }\n}"
queryString: createGuest queryString: createGuest
@ -195,7 +195,7 @@ MonoBehaviour:
isComplete: 1 isComplete: 1
- name: JoinPromotion - name: JoinPromotion
type: 1 type: 1
query: "mutation JoinPromotion{\n joinPromotion( input :{ promotionId :\"63c67c5434403000\"} query: "mutation JoinPromotion{\n joinPromotion( input :{ promotionId :\"63d878a42dc03000\"}
){\n id\n totalScore\n }\n}" ){\n id\n totalScore\n }\n}"
queryString: joinPromotion queryString: joinPromotion
returnType: Participant returnType: Participant
@ -281,8 +281,8 @@ MonoBehaviour:
- name: SubmitGameSession - name: SubmitGameSession
type: 1 type: 1
query: "mutation SubmitGameSession{\n submitGameSession( input :{ playerId query: "mutation SubmitGameSession{\n submitGameSession( input :{ playerId
:\"63e929e9b3c01000\", promotionId :\"63c67c5434403000\", startAt :\"2024-05-23T15:53:22.407196+07:00\", :\"6415300313c01000\", promotionId :\"63d878a42dc03000\", startAt :\"2024-06-26T20:34:49.291114+07:00\",
endAt :\"2024-05-23T15:53:29.062723+07:00\", score :1000} ){\n startAt\n endAt :\"2024-06-26T20:38:45.135242+07:00\", score :0} ){\n startAt\n
endAt\n score\n }\n}" endAt\n score\n }\n}"
queryString: submitGameSession queryString: submitGameSession
returnType: GameSession returnType: GameSession
@ -392,7 +392,7 @@ MonoBehaviour:
- name: GuestUpdatedSubscription - name: GuestUpdatedSubscription
type: 2 type: 2
query: "subscription GuestUpdatedSubscription{\n guestUpdatedSubscription( query: "subscription GuestUpdatedSubscription{\n guestUpdatedSubscription(
guestId :\"63e929e9b3c01000\" ){\n firstName\n lastName\n guestId :\"6415300313c01000\" ){\n firstName\n lastName\n
phone\n email\n }\n}" phone\n email\n }\n}"
queryString: guestUpdatedSubscription queryString: guestUpdatedSubscription
returnType: Guest returnType: Guest

View File

@ -235,7 +235,7 @@ RectTransform:
m_AnchorMin: {x: 0, y: 0} m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 1, y: 1} m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: 0} m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 100, y: 100} m_SizeDelta: {x: 0, y: 0}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &153040809 --- !u!114 &153040809
MonoBehaviour: MonoBehaviour:
@ -832,8 +832,8 @@ MonoBehaviour:
m_Calls: [] m_Calls: []
m_text: m_text:
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 32ca7ffda2664c077bcf6abfc6f32d0d, type: 2} m_fontAsset: {fileID: 11400000, guid: 9cf4fd6f40135976a80659e0dafb626a, type: 2}
m_sharedMaterial: {fileID: -1949374272958031481, guid: 32ca7ffda2664c077bcf6abfc6f32d0d, type: 2} m_sharedMaterial: {fileID: 4024459306243952741, guid: 9cf4fd6f40135976a80659e0dafb626a, type: 2}
m_fontSharedMaterials: [] m_fontSharedMaterials: []
m_fontMaterial: {fileID: 0} m_fontMaterial: {fileID: 0}
m_fontMaterials: [] m_fontMaterials: []
@ -863,7 +863,7 @@ MonoBehaviour:
m_enableAutoSizing: 0 m_enableAutoSizing: 0
m_fontSizeMin: 18 m_fontSizeMin: 18
m_fontSizeMax: 72 m_fontSizeMax: 72
m_fontStyle: 0 m_fontStyle: 1
m_HorizontalAlignment: 2 m_HorizontalAlignment: 2
m_VerticalAlignment: 512 m_VerticalAlignment: 512
m_textAlignment: 65535 m_textAlignment: 65535
@ -1442,7 +1442,7 @@ RectTransform:
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 1} m_AnchorMin: {x: 0, y: 1}
m_AnchorMax: {x: 1, y: 1} m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: -160} m_AnchoredPosition: {x: 0, y: -110}
m_SizeDelta: {x: -100, y: 50} m_SizeDelta: {x: -100, y: 50}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &1201464276 --- !u!114 &1201464276
@ -1465,7 +1465,7 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: "Soi C\xE0 Ph\xEA, Ra T\xEDnh N\u1EBFt!" m_text: "Soi C\xE0 Ph\xEA, Ra Ti\u1EC1n Ki\u1EBFp"
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2} m_fontAsset: {fileID: 11400000, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2}
m_sharedMaterial: {fileID: 8449302638115568603, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2} m_sharedMaterial: {fileID: 8449302638115568603, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2}
@ -1492,8 +1492,8 @@ MonoBehaviour:
m_faceColor: m_faceColor:
serializedVersion: 2 serializedVersion: 2
rgba: 4294967295 rgba: 4294967295
m_fontSize: 75 m_fontSize: 68
m_fontSizeBase: 75 m_fontSizeBase: 68
m_fontWeight: 400 m_fontWeight: 400
m_enableAutoSizing: 0 m_enableAutoSizing: 0
m_fontSizeMin: 18 m_fontSizeMin: 18
@ -1602,7 +1602,7 @@ Camera:
near clip plane: 0.3 near clip plane: 0.3
far clip plane: 1000 far clip plane: 1000
field of view: 60 field of view: 60
orthographic: 0 orthographic: 1
orthographic size: 5 orthographic size: 5
m_Depth: -1 m_Depth: -1
m_CullingMask: m_CullingMask:
@ -2012,7 +2012,7 @@ RectTransform:
m_PrefabInstance: {fileID: 0} m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1559499562} m_GameObject: {fileID: 1559499562}
m_LocalRotation: {x: 0, y: 0, z: -0.38268343, w: 0.92387956} m_LocalRotation: {x: 0, y: 0, z: -0.39313957, w: 0.91947883}
m_LocalPosition: {x: 0, y: 0, z: 0} m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1.2, y: 1.2, z: 1.2} m_LocalScale: {x: 1.2, y: 1.2, z: 1.2}
m_ConstrainProportionsScale: 1 m_ConstrainProportionsScale: 1
@ -2020,11 +2020,11 @@ RectTransform:
- {fileID: 1101322599} - {fileID: 1101322599}
- {fileID: 677383977} - {fileID: 677383977}
m_Father: {fileID: 854700888} m_Father: {fileID: 854700888}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: -45} m_LocalEulerAnglesHint: {x: 0, y: 0, z: -46.3}
m_AnchorMin: {x: 0.5, y: 0.5} m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5} m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: 70, y: 150} m_AnchoredPosition: {x: 70, y: 203.24}
m_SizeDelta: {x: 1000, y: 1000} m_SizeDelta: {x: 960, y: 960}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!222 &1559499564 --- !u!222 &1559499564
CanvasRenderer: CanvasRenderer:

View File

@ -153,7 +153,7 @@ MonoBehaviour:
m_Name: m_Name:
m_EditorClassIdentifier: m_EditorClassIdentifier:
_graphApi: {fileID: 11400000, guid: c2740d22f4ae04448a082f5f49761822, type: 2} _graphApi: {fileID: 11400000, guid: c2740d22f4ae04448a082f5f49761822, type: 2}
_promotionId: 63fb40435c803000 _promotionId: 63d878a42dc03000
_guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2} _guestUpdatedSubscription: {fileID: 11400000, guid: f98ac02dda5623c4c82d342ee9602420, type: 2}
--- !u!4 &398718624 --- !u!4 &398718624
Transform: Transform:

View File

@ -252,81 +252,6 @@ RectTransform:
m_AnchoredPosition: {x: 0, y: 0} m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 100, y: 100} m_SizeDelta: {x: 100, y: 100}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!1 &162662060
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 162662061}
- component: {fileID: 162662063}
- component: {fileID: 162662062}
m_Layer: 5
m_Name: Logo
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!224 &162662061
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 162662060}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_ConstrainProportionsScale: 0
m_Children: []
m_Father: {fileID: 1939570967}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 1, y: 0}
m_AnchorMax: {x: 1, y: 0}
m_AnchoredPosition: {x: -70, y: 70}
m_SizeDelta: {x: 150, y: 150}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &162662062
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 162662060}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: fe87c0e1cc204ed48ad3b37840f39efc, type: 3}
m_Name:
m_EditorClassIdentifier:
m_Material: {fileID: 0}
m_Color: {r: 1, g: 1, b: 1, a: 1}
m_RaycastTarget: 1
m_RaycastPadding: {x: 0, y: 0, z: 0, w: 0}
m_Maskable: 1
m_OnCullStateChanged:
m_PersistentCalls:
m_Calls: []
m_Sprite: {fileID: 21300000, guid: ad4b693625d4260a0a76fd968adf90dd, type: 3}
m_Type: 0
m_PreserveAspect: 0
m_FillCenter: 1
m_FillMethod: 4
m_FillAmount: 1
m_FillClockwise: 1
m_FillOrigin: 0
m_UseSpriteMesh: 0
m_PixelsPerUnitMultiplier: 1
--- !u!222 &162662063
CanvasRenderer:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 162662060}
m_CullTransparentMesh: 1
--- !u!1 &226662302 --- !u!1 &226662302
GameObject: GameObject:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
@ -441,7 +366,7 @@ RectTransform:
m_PrefabInstance: {fileID: 0} m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 263418221} m_GameObject: {fileID: 263418221}
m_LocalRotation: {x: 0, y: 0, z: -0.38268343, w: 0.92387956} m_LocalRotation: {x: 0, y: 0, z: -0.39313957, w: 0.91947883}
m_LocalPosition: {x: 0, y: 0, z: 0} m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1.2, y: 1.2, z: 1.2} m_LocalScale: {x: 1.2, y: 1.2, z: 1.2}
m_ConstrainProportionsScale: 1 m_ConstrainProportionsScale: 1
@ -450,11 +375,11 @@ RectTransform:
- {fileID: 1701610192} - {fileID: 1701610192}
- {fileID: 2075871718} - {fileID: 2075871718}
m_Father: {fileID: 1447100632} m_Father: {fileID: 1447100632}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: -45} m_LocalEulerAnglesHint: {x: 0, y: 0, z: -46.3}
m_AnchorMin: {x: 0.5, y: 0.5} m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5} m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: 70, y: 150} m_AnchoredPosition: {x: 70, y: 203.24}
m_SizeDelta: {x: 1000, y: 1000} m_SizeDelta: {x: 960, y: 960}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &263418223 --- !u!114 &263418223
MonoBehaviour: MonoBehaviour:
@ -528,7 +453,7 @@ RectTransform:
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 1} m_AnchorMin: {x: 0, y: 1}
m_AnchorMax: {x: 1, y: 1} m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: -160} m_AnchoredPosition: {x: 0, y: -110}
m_SizeDelta: {x: -100, y: 50} m_SizeDelta: {x: -100, y: 50}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &282395849 --- !u!114 &282395849
@ -551,7 +476,7 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: "Soi C\xE0 Ph\xEA, Ra T\xEDnh N\u1EBFt!" m_text: "Soi C\xE0 Ph\xEA, Ra Ti\u1EC1n Ki\u1EBFp"
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2} m_fontAsset: {fileID: 11400000, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2}
m_sharedMaterial: {fileID: 8449302638115568603, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2} m_sharedMaterial: {fileID: 8449302638115568603, guid: 10a7b9e40a1040b5396b3833a6aa8338, type: 2}
@ -578,8 +503,8 @@ MonoBehaviour:
m_faceColor: m_faceColor:
serializedVersion: 2 serializedVersion: 2
rgba: 4294967295 rgba: 4294967295
m_fontSize: 75 m_fontSize: 68
m_fontSizeBase: 75 m_fontSizeBase: 68
m_fontWeight: 400 m_fontWeight: 400
m_enableAutoSizing: 0 m_enableAutoSizing: 0
m_fontSizeMin: 18 m_fontSizeMin: 18
@ -653,7 +578,7 @@ RectTransform:
m_PrefabInstance: {fileID: 0} m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 334911199} m_GameObject: {fileID: 334911199}
m_LocalRotation: {x: -0, y: -0, z: -0, w: 1} m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0} m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1} m_LocalScale: {x: 1, y: 1, z: 1}
m_ConstrainProportionsScale: 0 m_ConstrainProportionsScale: 0
@ -662,8 +587,8 @@ RectTransform:
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0.5, y: 0.5} m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5} m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: 0, y: -400} m_AnchoredPosition: {x: 0, y: -300}
m_SizeDelta: {x: 800, y: 200} m_SizeDelta: {x: 900, y: 200}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &334911201 --- !u!114 &334911201
MonoBehaviour: MonoBehaviour:
@ -688,8 +613,8 @@ MonoBehaviour:
m_text: "Nh\xECn th\u1EB3ng v\xE0o m\xE0n h\xECnh v\xE0 kh\xF4ng \u0111eo kh\u1EA9u m_text: "Nh\xECn th\u1EB3ng v\xE0o m\xE0n h\xECnh v\xE0 kh\xF4ng \u0111eo kh\u1EA9u
trang nh\xE9" trang nh\xE9"
m_isRightToLeft: 0 m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 67f211977806da85ba63ff22497a20f3, type: 2} m_fontAsset: {fileID: 11400000, guid: 9cf4fd6f40135976a80659e0dafb626a, type: 2}
m_sharedMaterial: {fileID: -4438248766993109609, guid: 67f211977806da85ba63ff22497a20f3, type: 2} m_sharedMaterial: {fileID: 4024459306243952741, guid: 9cf4fd6f40135976a80659e0dafb626a, type: 2}
m_fontSharedMaterials: [] m_fontSharedMaterials: []
m_fontMaterial: {fileID: 0} m_fontMaterial: {fileID: 0}
m_fontMaterials: [] m_fontMaterials: []
@ -713,13 +638,13 @@ MonoBehaviour:
m_faceColor: m_faceColor:
serializedVersion: 2 serializedVersion: 2
rgba: 4294967295 rgba: 4294967295
m_fontSize: 45 m_fontSize: 50
m_fontSizeBase: 45 m_fontSizeBase: 50
m_fontWeight: 400 m_fontWeight: 400
m_enableAutoSizing: 0 m_enableAutoSizing: 0
m_fontSizeMin: 18 m_fontSizeMin: 18
m_fontSizeMax: 72 m_fontSizeMax: 72
m_fontStyle: 0 m_fontStyle: 1
m_HorizontalAlignment: 2 m_HorizontalAlignment: 2
m_VerticalAlignment: 512 m_VerticalAlignment: 512
m_textAlignment: 65535 m_textAlignment: 65535
@ -852,7 +777,7 @@ VideoPlayer:
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 378183292} m_GameObject: {fileID: 378183292}
m_Enabled: 1 m_Enabled: 1
m_VideoClip: {fileID: 32900000, guid: b431e4dee076c8b698cee9a4d18c6dbd, type: 3} m_VideoClip: {fileID: 32900000, guid: adc516ee7ca030f02a69b62fcb3436e2, type: 3}
m_TargetCameraAlpha: 1 m_TargetCameraAlpha: 1
m_TargetCamera3DLayout: 0 m_TargetCamera3DLayout: 0
m_TargetCamera: {fileID: 1302683601} m_TargetCamera: {fileID: 1302683601}
@ -1021,7 +946,7 @@ CanvasRenderer:
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 731571811} m_GameObject: {fileID: 731571811}
m_CullTransparentMesh: 1 m_CullTransparentMesh: 1
--- !u!1 &879095297 --- !u!1 &1023304014
GameObject: GameObject:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0} m_CorrespondingSourceObject: {fileID: 0}
@ -1029,45 +954,45 @@ GameObject:
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
serializedVersion: 6 serializedVersion: 6
m_Component: m_Component:
- component: {fileID: 879095298} - component: {fileID: 1023304015}
- component: {fileID: 879095300} - component: {fileID: 1023304017}
- component: {fileID: 879095299} - component: {fileID: 1023304016}
m_Layer: 5 m_Layer: 5
m_Name: FooterText m_Name: Logo
m_TagString: Untagged m_TagString: Untagged
m_Icon: {fileID: 0} m_Icon: {fileID: 0}
m_NavMeshLayer: 0 m_NavMeshLayer: 0
m_StaticEditorFlags: 0 m_StaticEditorFlags: 0
m_IsActive: 1 m_IsActive: 1
--- !u!224 &879095298 --- !u!224 &1023304015
RectTransform: RectTransform:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0} m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0} m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 879095297} m_GameObject: {fileID: 1023304014}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1} m_LocalRotation: {x: -0, y: -0, z: -0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0} m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1} m_LocalScale: {x: 1, y: 1, z: 1}
m_ConstrainProportionsScale: 0 m_ConstrainProportionsScale: 0
m_Children: [] m_Children: []
m_Father: {fileID: 1939570967} m_Father: {fileID: 1563200507}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0.5, y: 0} m_AnchorMin: {x: 1, y: 0}
m_AnchorMax: {x: 0.5, y: 0} m_AnchorMax: {x: 1, y: 0}
m_AnchoredPosition: {x: 0, y: 50} m_AnchoredPosition: {x: 435.8, y: -844.2}
m_SizeDelta: {x: 500, y: 100} m_SizeDelta: {x: 150, y: 150}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &879095299 --- !u!114 &1023304016
MonoBehaviour: MonoBehaviour:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0} m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0} m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 879095297} m_GameObject: {fileID: 1023304014}
m_Enabled: 1 m_Enabled: 1
m_EditorHideFlags: 0 m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: f4688fdb7df04437aeb418b961361dc5, type: 3} m_Script: {fileID: 11500000, guid: fe87c0e1cc204ed48ad3b37840f39efc, type: 3}
m_Name: m_Name:
m_EditorClassIdentifier: m_EditorClassIdentifier:
m_Material: {fileID: 0} m_Material: {fileID: 0}
@ -1078,82 +1003,23 @@ MonoBehaviour:
m_OnCullStateChanged: m_OnCullStateChanged:
m_PersistentCalls: m_PersistentCalls:
m_Calls: [] m_Calls: []
m_text: Power by GadSmart m_Sprite: {fileID: 21300000, guid: ad4b693625d4260a0a76fd968adf90dd, type: 3}
m_isRightToLeft: 0 m_Type: 0
m_fontAsset: {fileID: 11400000, guid: abe5db4279697b507a6db6ae49ee159e, type: 2} m_PreserveAspect: 0
m_sharedMaterial: {fileID: 6446697368654705400, guid: abe5db4279697b507a6db6ae49ee159e, type: 2} m_FillCenter: 1
m_fontSharedMaterials: [] m_FillMethod: 4
m_fontMaterial: {fileID: 0} m_FillAmount: 1
m_fontMaterials: [] m_FillClockwise: 1
m_fontColor32: m_FillOrigin: 0
serializedVersion: 2 m_UseSpriteMesh: 0
rgba: 4294967295 m_PixelsPerUnitMultiplier: 1
m_fontColor: {r: 1, g: 1, b: 1, a: 1} --- !u!222 &1023304017
m_enableVertexGradient: 0
m_colorMode: 3
m_fontColorGradient:
topLeft: {r: 1, g: 1, b: 1, a: 1}
topRight: {r: 1, g: 1, b: 1, a: 1}
bottomLeft: {r: 1, g: 1, b: 1, a: 1}
bottomRight: {r: 1, g: 1, b: 1, a: 1}
m_fontColorGradientPreset: {fileID: 0}
m_spriteAsset: {fileID: 0}
m_tintAllSprites: 0
m_StyleSheet: {fileID: 0}
m_TextStyleHashCode: -1183493901
m_overrideHtmlColors: 0
m_faceColor:
serializedVersion: 2
rgba: 4294967295
m_fontSize: 35
m_fontSizeBase: 35
m_fontWeight: 400
m_enableAutoSizing: 0
m_fontSizeMin: 18
m_fontSizeMax: 72
m_fontStyle: 0
m_HorizontalAlignment: 2
m_VerticalAlignment: 512
m_textAlignment: 65535
m_characterSpacing: 0
m_wordSpacing: 0
m_lineSpacing: 0
m_lineSpacingMax: 0
m_paragraphSpacing: 0
m_charWidthMaxAdj: 0
m_enableWordWrapping: 1
m_wordWrappingRatios: 0.4
m_overflowMode: 0
m_linkedTextComponent: {fileID: 0}
parentLinkedComponent: {fileID: 0}
m_enableKerning: 1
m_enableExtraPadding: 0
checkPaddingRequired: 0
m_isRichText: 1
m_parseCtrlCharacters: 1
m_isOrthographic: 1
m_isCullingEnabled: 0
m_horizontalMapping: 0
m_verticalMapping: 0
m_uvLineOffset: 0
m_geometrySortingOrder: 0
m_IsTextObjectScaleStatic: 0
m_VertexBufferAutoSizeReduction: 0
m_useMaxVisibleDescender: 1
m_pageToDisplay: 1
m_margin: {x: 0, y: 0, z: 0, w: 0}
m_isUsingLegacyAnimationComponent: 0
m_isVolumetricText: 0
m_hasFontAssetChanged: 0
m_baseMaterial: {fileID: 0}
m_maskOffset: {x: 0, y: 0, z: 0, w: 0}
--- !u!222 &879095300
CanvasRenderer: CanvasRenderer:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0} m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0} m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 879095297} m_GameObject: {fileID: 1023304014}
m_CullTransparentMesh: 1 m_CullTransparentMesh: 1
--- !u!1 &1087577777 --- !u!1 &1087577777
GameObject: GameObject:
@ -1230,6 +1096,215 @@ CanvasRenderer:
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1087577777} m_GameObject: {fileID: 1087577777}
m_CullTransparentMesh: 1 m_CullTransparentMesh: 1
--- !u!1 &1230984860
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 1230984861}
- component: {fileID: 1230984863}
- component: {fileID: 1230984862}
m_Layer: 5
m_Name: Footer
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!224 &1230984861
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1230984860}
m_LocalRotation: {x: -0, y: -0, z: -0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_ConstrainProportionsScale: 0
m_Children: []
m_Father: {fileID: 1563200507}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0.5, y: 0}
m_AnchorMax: {x: 0.5, y: 0}
m_AnchoredPosition: {x: 0, y: -885}
m_SizeDelta: {x: 1080, y: 50}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &1230984862
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1230984860}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: f4688fdb7df04437aeb418b961361dc5, type: 3}
m_Name:
m_EditorClassIdentifier:
m_Material: {fileID: 0}
m_Color: {r: 1, g: 1, b: 1, a: 1}
m_RaycastTarget: 1
m_RaycastPadding: {x: 0, y: 0, z: 0, w: 0}
m_Maskable: 1
m_OnCullStateChanged:
m_PersistentCalls:
m_Calls: []
m_text: Power by GadSmart
m_isRightToLeft: 0
m_fontAsset: {fileID: 11400000, guid: 8f586378b4e144a9851e7b34d9b748ee, type: 2}
m_sharedMaterial: {fileID: 2180264, guid: 8f586378b4e144a9851e7b34d9b748ee, type: 2}
m_fontSharedMaterials: []
m_fontMaterial: {fileID: 0}
m_fontMaterials: []
m_fontColor32:
serializedVersion: 2
rgba: 4294967295
m_fontColor: {r: 1, g: 1, b: 1, a: 1}
m_enableVertexGradient: 0
m_colorMode: 3
m_fontColorGradient:
topLeft: {r: 1, g: 1, b: 1, a: 1}
topRight: {r: 1, g: 1, b: 1, a: 1}
bottomLeft: {r: 1, g: 1, b: 1, a: 1}
bottomRight: {r: 1, g: 1, b: 1, a: 1}
m_fontColorGradientPreset: {fileID: 0}
m_spriteAsset: {fileID: 0}
m_tintAllSprites: 0
m_StyleSheet: {fileID: 0}
m_TextStyleHashCode: -1183493901
m_overrideHtmlColors: 0
m_faceColor:
serializedVersion: 2
rgba: 4294967295
m_fontSize: 36
m_fontSizeBase: 36
m_fontWeight: 400
m_enableAutoSizing: 0
m_fontSizeMin: 18
m_fontSizeMax: 72
m_fontStyle: 0
m_HorizontalAlignment: 2
m_VerticalAlignment: 256
m_textAlignment: 65535
m_characterSpacing: 0
m_wordSpacing: 0
m_lineSpacing: 0
m_lineSpacingMax: 0
m_paragraphSpacing: 0
m_charWidthMaxAdj: 0
m_enableWordWrapping: 1
m_wordWrappingRatios: 0.4
m_overflowMode: 0
m_linkedTextComponent: {fileID: 0}
parentLinkedComponent: {fileID: 0}
m_enableKerning: 1
m_enableExtraPadding: 0
checkPaddingRequired: 0
m_isRichText: 1
m_parseCtrlCharacters: 1
m_isOrthographic: 1
m_isCullingEnabled: 0
m_horizontalMapping: 0
m_verticalMapping: 0
m_uvLineOffset: 0
m_geometrySortingOrder: 0
m_IsTextObjectScaleStatic: 0
m_VertexBufferAutoSizeReduction: 0
m_useMaxVisibleDescender: 1
m_pageToDisplay: 1
m_margin: {x: 0, y: 0, z: 0, w: 0}
m_isUsingLegacyAnimationComponent: 0
m_isVolumetricText: 0
m_hasFontAssetChanged: 0
m_baseMaterial: {fileID: 0}
m_maskOffset: {x: 0, y: 0, z: 0, w: 0}
--- !u!222 &1230984863
CanvasRenderer:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1230984860}
m_CullTransparentMesh: 1
--- !u!1 &1244346416
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 1244346417}
- component: {fileID: 1244346419}
- component: {fileID: 1244346418}
m_Layer: 5
m_Name: Image
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 0
--- !u!224 &1244346417
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1244346416}
m_LocalRotation: {x: -0, y: -0, z: -0.38268343, w: 0.92387956}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_ConstrainProportionsScale: 0
m_Children: []
m_Father: {fileID: 1563200507}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: -45}
m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: 70, y: 239}
m_SizeDelta: {x: 1100, y: 1100}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &1244346418
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1244346416}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: fe87c0e1cc204ed48ad3b37840f39efc, type: 3}
m_Name:
m_EditorClassIdentifier:
m_Material: {fileID: 0}
m_Color: {r: 1, g: 1, b: 1, a: 1}
m_RaycastTarget: 1
m_RaycastPadding: {x: 0, y: 0, z: 0, w: 0}
m_Maskable: 1
m_OnCullStateChanged:
m_PersistentCalls:
m_Calls: []
m_Sprite: {fileID: 21300000, guid: 7c8b06e889a8626f68e2b3970f11d8ad, type: 3}
m_Type: 0
m_PreserveAspect: 0
m_FillCenter: 1
m_FillMethod: 4
m_FillAmount: 1
m_FillClockwise: 1
m_FillOrigin: 0
m_UseSpriteMesh: 0
m_PixelsPerUnitMultiplier: 1
--- !u!222 &1244346419
CanvasRenderer:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1244346416}
m_CullTransparentMesh: 1
--- !u!1 &1302683599 --- !u!1 &1302683599
GameObject: GameObject:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
@ -1290,7 +1365,7 @@ Camera:
near clip plane: 0.3 near clip plane: 0.3
far clip plane: 1000 far clip plane: 1000
field of view: 60 field of view: 60
orthographic: 0 orthographic: 1
orthographic size: 5 orthographic size: 5
m_Depth: -1 m_Depth: -1
m_CullingMask: m_CullingMask:
@ -1322,6 +1397,81 @@ Transform:
m_Children: [] m_Children: []
m_Father: {fileID: 0} m_Father: {fileID: 0}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!1 &1329164563
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 1329164564}
- component: {fileID: 1329164566}
- component: {fileID: 1329164565}
m_Layer: 5
m_Name: Image
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 0
--- !u!224 &1329164564
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1329164563}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_ConstrainProportionsScale: 0
m_Children: []
m_Father: {fileID: 1939570967}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 1080, y: 1920}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &1329164565
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1329164563}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: fe87c0e1cc204ed48ad3b37840f39efc, type: 3}
m_Name:
m_EditorClassIdentifier:
m_Material: {fileID: 0}
m_Color: {r: 1, g: 1, b: 1, a: 0.5803922}
m_RaycastTarget: 1
m_RaycastPadding: {x: 0, y: 0, z: 0, w: 0}
m_Maskable: 1
m_OnCullStateChanged:
m_PersistentCalls:
m_Calls: []
m_Sprite: {fileID: 21300000, guid: c2e8325577e88827495d112ded00f5f6, type: 3}
m_Type: 0
m_PreserveAspect: 0
m_FillCenter: 1
m_FillMethod: 4
m_FillAmount: 1
m_FillClockwise: 1
m_FillOrigin: 0
m_UseSpriteMesh: 0
m_PixelsPerUnitMultiplier: 1
--- !u!222 &1329164566
CanvasRenderer:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1329164563}
m_CullTransparentMesh: 1
--- !u!1 &1386430227 --- !u!1 &1386430227
GameObject: GameObject:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
@ -1616,7 +1766,7 @@ RectTransform:
m_AnchorMin: {x: 0, y: 0} m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 1, y: 1} m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: 0} m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 100, y: 100} m_SizeDelta: {x: 0, y: 0}
m_Pivot: {x: 0.5, y: 0.5} m_Pivot: {x: 0.5, y: 0.5}
--- !u!222 &1391297539 --- !u!222 &1391297539
CanvasRenderer: CanvasRenderer:
@ -1892,6 +2042,44 @@ CanvasRenderer:
m_PrefabAsset: {fileID: 0} m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1510993347} m_GameObject: {fileID: 1510993347}
m_CullTransparentMesh: 1 m_CullTransparentMesh: 1
--- !u!1 &1563200506
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 1563200507}
m_Layer: 5
m_Name: Footer (1)
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!224 &1563200507
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1563200506}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_ConstrainProportionsScale: 0
m_Children:
- {fileID: 1230984861}
- {fileID: 1023304015}
- {fileID: 1244346417}
m_Father: {fileID: 1939570967}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: 0, y: 1}
m_SizeDelta: {x: 100, y: 100}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!1 &1632988511 --- !u!1 &1632988511
GameObject: GameObject:
m_ObjectHideFlags: 0 m_ObjectHideFlags: 0
@ -1930,7 +2118,9 @@ MonoBehaviour:
_idleVideo: {fileID: 378183297} _idleVideo: {fileID: 378183297}
_userImager: {fileID: 1391297540} _userImager: {fileID: 1391297540}
_process: {fileID: 1701610193} _process: {fileID: 1701610193}
_faceGuide: {fileID: 226662305}
_hintText: {fileID: 1386430229} _hintText: {fileID: 1386430229}
_notifyText: {fileID: 334911201}
_notify: {fileID: 338289613} _notify: {fileID: 338289613}
_hint: {fileID: 80211465} _hint: {fileID: 80211465}
_loading: {fileID: 1087577779} _loading: {fileID: 1087577779}
@ -2006,7 +2196,7 @@ MonoBehaviour:
m_Name: m_Name:
m_EditorClassIdentifier: m_EditorClassIdentifier:
m_Material: {fileID: 0} m_Material: {fileID: 0}
m_Color: {r: 0.4339623, g: 0.2504109, b: 0.13305448, a: 0.7294118} m_Color: {r: 0, g: 0.6862745, b: 0, a: 0.7294118}
m_RaycastTarget: 1 m_RaycastTarget: 1
m_RaycastPadding: {x: 0, y: 0, z: 0, w: 0} m_RaycastPadding: {x: 0, y: 0, z: 0, w: 0}
m_Maskable: 1 m_Maskable: 1
@ -2208,9 +2398,9 @@ RectTransform:
- {fileID: 1796482201} - {fileID: 1796482201}
- {fileID: 282395848} - {fileID: 282395848}
- {fileID: 338289614} - {fileID: 338289614}
- {fileID: 162662061}
- {fileID: 879095298}
- {fileID: 80211466} - {fileID: 80211466}
- {fileID: 1329164564}
- {fileID: 1563200507}
m_Father: {fileID: 0} m_Father: {fileID: 0}
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0} m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 0} m_AnchorMin: {x: 0, y: 0}

View File

@ -7,6 +7,7 @@ using UnityEngine;
using UnityEngine.Serialization; using UnityEngine.Serialization;
using UnityEngine.UI; using UnityEngine.UI;
using UnityEngine.Video; using UnityEngine.Video;
using GadGame.Network;
namespace GadGame.Scripts.Coffee namespace GadGame.Scripts.Coffee
{ {
@ -21,7 +22,9 @@ namespace GadGame.Scripts.Coffee
[SerializeField] private VideoPlayer _idleVideo; [SerializeField] private VideoPlayer _idleVideo;
[SerializeField] private LoadImageEncoded _userImager; [SerializeField] private LoadImageEncoded _userImager;
[SerializeField] private Image _process; [SerializeField] private Image _process;
[SerializeField] private Image _faceGuide;
[SerializeField] private TextMeshProUGUI _hintText; [SerializeField] private TextMeshProUGUI _hintText;
[SerializeField] private TextMeshProUGUI _notifyText;
[SerializeField] private GameObject _notify; [SerializeField] private GameObject _notify;
[SerializeField] private GameObject _hint; [SerializeField] private GameObject _hint;
[SerializeField] private Image _loading; [SerializeField] private Image _loading;
@ -35,8 +38,6 @@ namespace GadGame.Scripts.Coffee
private async void Awake() private async void Awake()
{ {
await P4PGraphqlManager.Instance.JoinPromotion();
await P4PGraphqlManager.Instance.SubmitGameSession(0);
_idleVideo.targetCameraAlpha = 1; _idleVideo.targetCameraAlpha = 1;
_loading.transform.DOLocalRotate(new Vector3(0, 0, 360), 10 / _loadingSpeed, RotateMode.FastBeyond360) _loading.transform.DOLocalRotate(new Vector3(0, 0, 360), 10 / _loadingSpeed, RotateMode.FastBeyond360)
.SetLoops(-1) .SetLoops(-1)
@ -45,6 +46,7 @@ namespace GadGame.Scripts.Coffee
_notify.SetActive(false); _notify.SetActive(false);
_hint.SetActive(false); _hint.SetActive(false);
_faceGuide.enabled = true;
} }
private void OnEnable() private void OnEnable()
@ -81,6 +83,13 @@ namespace GadGame.Scripts.Coffee
} }
// _hintText.text = _loadingTexts[_indexText]; // _hintText.text = _loadingTexts[_indexText];
} }
if (UdpSocket.Instance.DataReceived.AgeMax != 0)
{
_notifyText.text = UdpSocket.Instance.DataReceived.Gender < 0.5 ? "Hmmm, để xem kiếp trước bạn là ai nào, chàng trai" : "Hmmm, để xem kiếp trước bạn là ai nào, cô gái";
} else {
_notifyText.text = "Khuấy đều tay, kiếp trước của bạn sẽ hiện ra, chờ một chút nhé";
}
} }
private void Play(bool engage) { private void Play(bool engage) {
@ -94,8 +103,9 @@ namespace GadGame.Scripts.Coffee
_isLoading = false; _isLoading = false;
_loading.DOFade(0, 0.5f); _loading.DOFade(0, 0.5f);
// _hintText.text = desc; // _hintText.text = desc;
_notify.SetActive(false); // _notify.SetActive(false);
_hint.SetActive(false); _hint.SetActive(false);
_notifyText.text = UdpSocket.Instance.DataReceived.Description;
} }
private void OnGetEncodeImage(string filePath) private void OnGetEncodeImage(string filePath)
@ -111,6 +121,10 @@ namespace GadGame.Scripts.Coffee
// _hintText.text = _loadingTexts[_indexText]; // _hintText.text = _loadingTexts[_indexText];
_loading.DOFade(1, 1f); _loading.DOFade(1, 1f);
_hint.SetActive(true); _hint.SetActive(true);
// _notifyText.text = UdpSocket.Instance.DataReceived.Gender < 0.5 ? "Hmmm, có vẻ thú vị đấy chàng trai" : "Hmmm, có vẻ thú vị đấy cô gái";
// _notifyText.text = "Hmmm, tiền kiếp có vẻ thú vị đấy nhỉ!!!";
_faceGuide.enabled = false;
UdpSocket.Instance.SendDataToPython("Begin");
} }
private async void SetPlayVideo(bool value){ private async void SetPlayVideo(bool value){
@ -119,13 +133,22 @@ namespace GadGame.Scripts.Coffee
{ {
_idleVideo.targetCameraAlpha += Time.deltaTime * 3; _idleVideo.targetCameraAlpha += Time.deltaTime * 3;
await UniTask.Yield(); await UniTask.Yield();
if(_idleVideo == null){
return;
}
} }
_idleVideo.targetCameraAlpha = 1; _idleVideo.targetCameraAlpha = 1;
} else { } else {
while (_idleVideo.targetCameraAlpha > 0) while (_idleVideo.targetCameraAlpha > 0)
{ {
if(_idleVideo == null){
return;
}
_idleVideo.targetCameraAlpha -= Time.deltaTime * 3; _idleVideo.targetCameraAlpha -= Time.deltaTime * 3;
await UniTask.Yield(); await UniTask.Yield();
if(_idleVideo == null){
return;
}
} }
_idleVideo.targetCameraAlpha = 0; _idleVideo.targetCameraAlpha = 0;
} }
@ -135,6 +158,5 @@ namespace GadGame.Scripts.Coffee
// _hintText.text = _texts[1]; // _hintText.text = _texts[1];
_process.fillAmount = 1 - progress ; _process.fillAmount = 1 - progress ;
} }
} }
} }

View File

@ -24,14 +24,13 @@ namespace GadGame
{ {
base.Awake(); base.Awake();
DontDestroyOnLoad(gameObject); DontDestroyOnLoad(gameObject);
string _macAddress = GetMacAddressString();
Debug.Log(_macAddress);
await P4PGraphqlManager.Instance.LoginMachine(_macAddress);
await P4PGraphqlManager.Instance.CreateGuest();
} }
private async void Start() private async void Start()
{ {
string _macAddress = GetMacAddressString();
Debug.Log(_macAddress);
await P4PGraphqlManager.Instance.LoginMachine(_macAddress);
await LoadSceneManager.Instance.LoadSceneWithTransitionAsync(SceneFlowConfig.PassByScene.ScenePath); await LoadSceneManager.Instance.LoadSceneWithTransitionAsync(SceneFlowConfig.PassByScene.ScenePath);
SetState<IdleState>(); SetState<IdleState>();
} }

View File

@ -104,33 +104,33 @@ namespace GadGame.MiniGame
// var inputNormalize = new Vector2((inputData.x - 213.33f)/ 213.33f, inputData.y / 480); // var inputNormalize = new Vector2((inputData.x - 213.33f)/ 213.33f, inputData.y / 480);
// var inputNormalize = new Vector2(inputData.x/ 200, inputData.y / 480); // var inputNormalize = new Vector2(inputData.x/ 200, inputData.y / 480);
receivedData = UdpSocket.Instance.DataReceived.PosPoints; // receivedData = UdpSocket.Instance.DataReceived.PosPoints;
for (int i = 0; i < Objects.Length; i++) // for (int i = 0; i < Objects.Length; i++)
{ // {
var inputNormalize = new Vector2((receivedData[i].x - 213.33f)/ 213.33f, receivedData[i].y / 480); // var inputNormalize = new Vector2((receivedData[i].x - 213.33f)/ 213.33f, receivedData[i].y / 480);
if (i == 0) // if (i == 0)
{ // {
var input = new Vector2 // var input = new Vector2
{ // {
x = Mathf.Lerp(0, _canvas.pixelRect.width, inputNormalize.x), // x = Mathf.Lerp(0, _canvas.pixelRect.width, inputNormalize.x),
y = -Mathf.Lerp(0, _canvas.pixelRect.height, inputNormalize.y) // y = -Mathf.Lerp(0, _canvas.pixelRect.height, inputNormalize.y)
}; // };
if (input != Vector2.zero) // if (input != Vector2.zero)
{ // {
var mousePos = input; // var mousePos = input;
var pos = _camera.ScreenToWorldPoint(mousePos); // var pos = _camera.ScreenToWorldPoint(mousePos);
var currentPosition = _basket.Position; // var currentPosition = _basket.Position;
pos.x *= -1; // pos.x *= -1;
pos.y = currentPosition.y; // pos.y = currentPosition.y;
pos.z = 0; // pos.z = 0;
currentPosition= Vector3.Lerp(currentPosition, pos, _lerp * Time.deltaTime); // currentPosition= Vector3.Lerp(currentPosition, pos, _lerp * Time.deltaTime);
currentPosition.x = Mathf.Clamp(currentPosition.x, -2.25f, 2.25f); // currentPosition.x = Mathf.Clamp(currentPosition.x, -2.25f, 2.25f);
var dirMove = (_preFramePosition - currentPosition).normalized; // var dirMove = (_preFramePosition - currentPosition).normalized;
_basket.transform.DORotate(new Vector3(0, 0, 10 * dirMove.x), 0.2f); // _basket.transform.DORotate(new Vector3(0, 0, 10 * dirMove.x), 0.2f);
_basket.Position = currentPosition; // _basket.Position = currentPosition;
} // }
} // }
// var pos_pose = new Vector2(); // var pos_pose = new Vector2();
// var x = Mathf.Clamp01(receivedData[i].x / 640); // var x = Mathf.Clamp01(receivedData[i].x / 640);
@ -140,7 +140,7 @@ namespace GadGame.MiniGame
// pos_pose.y = y; // pos_pose.y = y;
// Objects[i].localPosition = pos_pose * -1; // Objects[i].localPosition = pos_pose * -1;
} // }
} }

View File

@ -121,7 +121,7 @@ namespace GadGame.Network
promotionId = _promotionId promotionId = _promotionId
} }
}); });
Debug.Log(_userAccessToken);
_graphApi.SetAuthToken(_userAccessToken); _graphApi.SetAuthToken(_userAccessToken);
var request = await _graphApi.Post(query); var request = await _graphApi.Post(query);
if (request.result == UnityWebRequest.Result.Success) if (request.result == UnityWebRequest.Result.Success)
@ -139,7 +139,7 @@ namespace GadGame.Network
public async Task<bool> SubmitGameSession(int gameScore) public async Task<bool> SubmitGameSession(int gameScore)
{ {
var endTime = DateTime.Now.AddSeconds(-1); var endTime = DateTime.Now;
var query = _graphApi.GetQueryByName("SubmitGameSession", GraphApi.Query.Type.Mutation); var query = _graphApi.GetQueryByName("SubmitGameSession", GraphApi.Query.Type.Mutation);
query.SetArgs(new query.SetArgs(new
{ {
@ -183,7 +183,8 @@ namespace GadGame.Network
var socket = await _graphApi.Subscribe(query); var socket = await _graphApi.Subscribe(query);
if (socket.State == WebSocketState.Open) if (socket.State == WebSocketState.Open)
{ {
var link = $"https://play4promo.online/brands/{_promotionId}/scan-qr?token={_userAccessToken}&img="; var imageNameFile = UdpSocket.Instance.DataReceived.FileName;
var link = $"https://play4promo.online/brands/{_promotionId}/scan-qr?token={_userAccessToken}&img={imageNameFile}";
Debug.Log(link); Debug.Log(link);
return EncodeTextToQrCode(link); return EncodeTextToQrCode(link);
} }

View File

@ -19,9 +19,10 @@ namespace GadGame.Network
public float Gender; public float Gender;
public int AgeMin; public int AgeMin;
public int AgeMax; public int AgeMax;
public Vector2[] PosPoints; // public Vector2[] PosPoints;
public string StreamingData; public string StreamingData;
[FormerlySerializedAs("Success")] public bool GenerateImageSuccess; [FormerlySerializedAs("Success")] public bool GenerateImageSuccess;
public string Description; public string Description;
public string FileName;
} }
} }

View File

@ -1,4 +1,5 @@
using System; using System;
using Cysharp.Threading.Tasks;
using GadGame.Event.Customs; using GadGame.Event.Customs;
using GadGame.Event.Type; using GadGame.Event.Type;
using GadGame.Network; using GadGame.Network;
@ -18,6 +19,7 @@ public class QRShowNewCTA : MonoBehaviour
{ {
_descText.text = UdpSocket.Instance.DataReceived.Description; _descText.text = UdpSocket.Instance.DataReceived.Description;
_rawImage.texture = await P4PGraphqlManager.Instance.GetQrLink(); _rawImage.texture = await P4PGraphqlManager.Instance.GetQrLink();
// _timer.SetDuration(60).Begin(); // _timer.SetDuration(60).Begin();
} }

View File

@ -14,19 +14,19 @@ namespace GadGame.State.MainFlowState
public override async void Enter() public override async void Enter()
{ {
await LoadSceneManager.Instance.LoadSceneAsync(Runner.SceneFlowConfig.CTASceneMale.ScenePath);
Runner.ScanSuccess.Register(OnScanSuccess);
_leaveTimer = 0; _leaveTimer = 0;
_scanSuccess = false;
Runner.ScanSuccess.Register(OnScanSuccess);
await LoadSceneManager.Instance.LoadSceneAsync(Runner.SceneFlowConfig.CTASceneMale.ScenePath);
} }
public override void Update(float time) public override void Update(float time)
{ {
Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData); Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData);
if(_scanSuccess) if(_scanSuccess || time >= 60)
{ {
UdpSocket.Instance.SendDataToPython("End");
Runner.SetState<IdleState>(); Runner.SetState<IdleState>();
UdpSocket.Instance.SendDataToPython("End");
return; return;
} }
@ -36,15 +36,16 @@ namespace GadGame.State.MainFlowState
if ( _leaveTimer >= 10) if ( _leaveTimer >= 10)
{ {
Runner.SetState<IdleState>(); Runner.SetState<IdleState>();
UdpSocket.Instance.SendDataToPython("End");
} }
} else { } else {
_leaveTimer = 0; _leaveTimer = 0;
} }
if (time >= 60) // if (time >= 30)
{ // {
Runner.SetState<IdleState>(); // Runner.SetState<IdleState>();
UdpSocket.Instance.SendDataToPython("End"); // UdpSocket.Instance.SendDataToPython("End");
} // }
} }
public override void Exit() public override void Exit()
@ -56,7 +57,6 @@ namespace GadGame.State.MainFlowState
private async void OnScanSuccess() { private async void OnScanSuccess() {
_scanSuccess = true; _scanSuccess = true;
await UniTask.Delay(TimeSpan.FromSeconds(10)); await UniTask.Delay(TimeSpan.FromSeconds(10));
Runner.SetState<IdleState>();
} }
private void LeaveComplete() private void LeaveComplete()

View File

@ -31,14 +31,14 @@ namespace GadGame.State.MainFlowState
return; return;
} }
if (!UdpSocket.Instance.DataReceived.Ready) _readyTimer = 3;
Runner.ReadyCountDown.Raise(_readyTimer / 3);
_readyTimer -= Time.deltaTime; _readyTimer -= Time.deltaTime;
if (_readyTimer <= 0) if (_readyTimer <= 0)
{ {
_readyTimer = 0; _readyTimer = 0;
Runner.SetState<WaitForImageState>(); Runner.SetState<WaitForImageState>();
} }
if (!UdpSocket.Instance.DataReceived.Ready) _readyTimer = 3;
Runner.ReadyCountDown.Raise(_readyTimer / 3);
if (time >= 2) if (time >= 2)
{ {
// Runner.ReadyCountDown(_readyTimer); // Runner.ReadyCountDown(_readyTimer);

View File

@ -7,8 +7,8 @@ namespace GadGame.State.MainFlowState
{ {
public override async void Enter() public override async void Enter()
{ {
await UniTask.Delay(1000); // await UniTask.Delay(1000);
Runner.PlayPassByAnim.Raise(false); // Runner.PlayPassByAnim.Raise(false);
Runner.PlayVideo.Raise(true); Runner.PlayVideo.Raise(true);
} }

View File

@ -10,7 +10,7 @@ namespace GadGame.State.MainFlowState
public override void Enter() public override void Enter()
{ {
// await LoadSceneManager.Instance.LoadSceneWithTransitionAsync(Runner.SceneFlowConfig.PassByScene.ScenePath); // await LoadSceneManager.Instance.LoadSceneWithTransitionAsync(Runner.SceneFlowConfig.PassByScene.ScenePath);
Runner.PlayPassByAnim.Raise(false); // Runner.PlayPassByAnim.Raise(false);
} }
public override void Update(float time) public override void Update(float time)

View File

@ -12,7 +12,7 @@ namespace GadGame.State.MainFlowState
public override void Update(float time) public override void Update(float time)
{ {
Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData); Runner.EncodeImage.Raise(UdpSocket.Instance.DataReceived.StreamingData);
if (time > 3.0f) if(time >= 2.0f)
{ {
Runner.SetState<CTAState>(); Runner.SetState<CTAState>();
} }

View File

@ -1,19 +1,23 @@
using GadGame.Network; using GadGame.Network;
namespace GadGame.State.MainFlowState namespace GadGame.State.MainFlowState
{ {
public class WaitForImageState : State<MainFlow> public class WaitForImageState : State<MainFlow>
{ {
public override void Enter() private bool _isLogin;
public async override void Enter()
{ {
_isLogin = false;
await P4PGraphqlManager.Instance.CreateGuest();
await P4PGraphqlManager.Instance.JoinPromotion();
await P4PGraphqlManager.Instance.SubmitGameSession(0);
_isLogin = true;
Runner.EngageReady.Raise(); Runner.EngageReady.Raise();
UdpSocket.Instance.SendDataToPython("Begin");
} }
public override void Update(float time) public override void Update(float time)
{ {
if (UdpSocket.Instance.DataReceived.GenerateImageSuccess) if (UdpSocket.Instance.DataReceived.GenerateImageSuccess && _isLogin)
{ {
Runner.SetState<ShowImageState>(); Runner.SetState<ShowImageState>();
} }

View File

@ -0,0 +1,18 @@
fileFormatVersion: 2
guid: 8ea96094d84e079a9b3d6d20df5b925d
VideoClipImporter:
externalObjects: {}
serializedVersion: 2
frameRange: 0
startFrame: -1
endFrame: -1
colorSpace: 0
deinterlace: 0
encodeAlpha: 0
flipVertical: 0
flipHorizontal: 0
importAudio: 1
targetSettings: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,18 @@
fileFormatVersion: 2
guid: adc516ee7ca030f02a69b62fcb3436e2
VideoClipImporter:
externalObjects: {}
serializedVersion: 2
frameRange: 0
startFrame: -1
endFrame: -1
colorSpace: 0
deinterlace: 0
encodeAlpha: 0
flipVertical: 0
flipHorizontal: 0
importAudio: 1
targetSettings: {}
userData:
assetBundleName:
assetBundleVariant:

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

View File

@ -0,0 +1,140 @@
fileFormatVersion: 2
guid: c2e8325577e88827495d112ded00f5f6
TextureImporter:
internalIDToNameTable: []
externalObjects: {}
serializedVersion: 12
mipmaps:
mipMapMode: 0
enableMipMap: 0
sRGBTexture: 1
linearTexture: 0
fadeOut: 0
borderMipMap: 0
mipMapsPreserveCoverage: 0
alphaTestReferenceValue: 0.5
mipMapFadeDistanceStart: 1
mipMapFadeDistanceEnd: 3
bumpmap:
convertToNormalMap: 0
externalNormalMap: 0
heightScale: 0.25
normalMapFilter: 0
flipGreenChannel: 0
isReadable: 0
streamingMipmaps: 0
streamingMipmapsPriority: 0
vTOnly: 0
ignoreMipmapLimit: 0
grayScaleToAlpha: 0
generateCubemap: 6
cubemapConvolution: 0
seamlessCubemap: 0
textureFormat: 1
maxTextureSize: 2048
textureSettings:
serializedVersion: 2
filterMode: 1
aniso: 1
mipBias: 0
wrapU: 1
wrapV: 1
wrapW: 0
nPOTScale: 0
lightmap: 0
compressionQuality: 50
spriteMode: 1
spriteExtrude: 1
spriteMeshType: 1
alignment: 0
spritePivot: {x: 0.5, y: 0.5}
spritePixelsToUnits: 100
spriteBorder: {x: 0, y: 0, z: 0, w: 0}
spriteGenerateFallbackPhysicsShape: 1
alphaUsage: 1
alphaIsTransparency: 1
spriteTessellationDetail: -1
textureType: 8
textureShape: 1
singleChannelComponent: 0
flipbookRows: 1
flipbookColumns: 1
maxTextureSizeSet: 0
compressionQualitySet: 0
textureFormatSet: 0
ignorePngGamma: 0
applyGammaDecoding: 0
swizzle: 50462976
cookieLightType: 0
platformSettings:
- serializedVersion: 3
buildTarget: DefaultTexturePlatform
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1
textureCompression: 1
compressionQuality: 50
crunchedCompression: 0
allowsAlphaSplitting: 0
overridden: 0
ignorePlatformSupport: 0
androidETC2FallbackOverride: 0
forceMaximumCompressionQuality_BC6H_BC7: 0
- serializedVersion: 3
buildTarget: Standalone
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1
textureCompression: 1
compressionQuality: 50
crunchedCompression: 0
allowsAlphaSplitting: 0
overridden: 0
ignorePlatformSupport: 0
androidETC2FallbackOverride: 0
forceMaximumCompressionQuality_BC6H_BC7: 0
- serializedVersion: 3
buildTarget: Android
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1
textureCompression: 1
compressionQuality: 50
crunchedCompression: 0
allowsAlphaSplitting: 0
overridden: 0
ignorePlatformSupport: 0
androidETC2FallbackOverride: 0
forceMaximumCompressionQuality_BC6H_BC7: 0
- serializedVersion: 3
buildTarget: Server
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1
textureCompression: 1
compressionQuality: 50
crunchedCompression: 0
allowsAlphaSplitting: 0
overridden: 0
ignorePlatformSupport: 0
androidETC2FallbackOverride: 0
forceMaximumCompressionQuality_BC6H_BC7: 0
spriteSheet:
serializedVersion: 2
sprites: []
outline: []
physicsShape: []
bones: []
spriteID: 5e97eb03825dee720800000000000000
internalID: 0
vertices: []
indices:
edges: []
weights: []
secondaryTextures: []
nameFileIdTable: {}
mipmapLimitGroupName:
pSDRemoveMatte: 0
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 560a88da2bbc70140bed167f0ba7fe37
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: fb01be13d6e88ca488dda82150319bfc
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 117dcc671050f5247bd8743b91ecaab7
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 1dac993f5da6b9bd3bbcb68bfd160650
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,75 @@
import requests
class FaceSwap:
def __init__(self):
self.swap_url = "https://faceswap3.p.rapidapi.com/faceswap/v1/image"
self.result_url = "https://faceswap3.p.rapidapi.com/result/"
self.image_save_path = "/image/result.jpg"
def download_image_from_url(self, url, output_file_path):
"""
Downloads an image from a URL and saves it to a local file.
:param url: The URL of the image to download.
:param output_file_path: The path where the downloaded image will be saved.
"""
try:
# Send a GET request to the URL
image_response = requests.get(url)
# Check if the request was successful
if image_response.status_code == 200:
# Save the image data to a file
with open(output_file_path, "wb") as image_file:
image_file.write(image_response.content)
print(f"Image saved as {output_file_path}")
else:
print(f"Failed to download image. HTTP status code: {image_response.status_code}")
except Exception as e:
print(f"An error occurred: {e}")
def swap_face(self, target_image):
payload = ("-----011000010111000001101001\r\nContent-Disposition: form-data; "
f"name=\"target_url\"\r\n\r\n{target_image}\r\n"
"-----011000010111000001101001\r\nContent-Disposition: form-data; "
"name=\"swap_url\"\r\n\r\nhttps://storage.gadgame.com/play4promo/sid/source_file.jpg\r\n"
"-----011000010111000001101001--\r\n\r\n")
headers = {
"x-rapidapi-key": "34c3c2de43msh9d754fc788c3d36p15c896jsn8796ea559bce",
"x-rapidapi-host": "faceswap3.p.rapidapi.com",
"Content-Type": "multipart/form-data; boundary=---011000010111000001101001"
}
response = requests.post(self.swap_url, data=payload, headers=headers)
result = response.json()
request_id = result["image_process_response"]["request_id"]
return request_id
def save_image_result(self, target_image):
image_path = "./image/merge_face.jpg"
request_id = self.swap_face(target_image)
payload = ("-----011000010111000001101001\r\nContent-Disposition: form-data; "
f"name=\"request_id\"\r\n\r\n{request_id}\r\n-----011000010111000001101001--\r\n\r\n")
headers = {
"x-rapidapi-key": "34c3c2de43msh9d754fc788c3d36p15c896jsn8796ea559bce",
"x-rapidapi-host": "faceswap3.p.rapidapi.com",
"Content-Type": "multipart/form-data; boundary=---011000010111000001101001"
}
response = requests.post(self.result_url, data=payload, headers=headers)
print(response.json())
url_image = response.json()["image_process_response"]["result_url"]
self.download_image_from_url(url_image, image_path)
return image_path

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 94e6bb77794d400b9adb912271e9941f
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1 @@
from .FaceSwapModule import FaceSwap

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 0ece29835e4ef583e80898ed95c61e8d
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 7a2fbb0cd5478c9dbaf60946dd7df1e5
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 102ca0ca9634dc185a92ebd06c1cbad2
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 3783e214796cedbd0b506dc384c97dc9
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 000026e2db7ba8c49bc6d9c9a1e58c3b
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 36a249d83335ab499b91133ebc3b8f21
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 5c4431e3ffba678799d85e315420454c
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 68fe493878d64ffa680af32efd02f09d
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 66345c062d5312daaa8e392971691b6f
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,189 @@
import joblib
import os
import sys
import torch
import torch.nn as nn
import numpy as np
import cv2
import copy
import scipy
import pathlib
import warnings
from math import sqrt
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
from Face_facial.models.common import Conv
from Face_facial.models.yolo import Model
from Face_facial.utils.datasets import letterbox
from Face_facial.utils.preprocess_utils import align_faces
from Face_facial.utils.general import check_img_size, non_max_suppression_face, \
scale_coords,scale_coords_landmarks,filter_boxes
class YoloDetector:
def __init__(self, weights_name='yolov5n_state_dict.pt', config_name='yolov5n.yaml', device='cuda:0', min_face=100, target_size=None, frontal=False):
"""
weights_name: name of file with network weights in weights/ folder.
config_name: name of .yaml cornfig with network configuration from models/ folder.
device : pytorch device. Use 'cuda:0', 'cuda:1', e.t.c to use gpu or 'cpu' to use cpu.
min_face : minimal face size in pixels.
target_size : target size of smaller image axis (choose lower for faster work). e.g. 480, 720, 1080. Choose None for original resolution.
frontal : if True tries to filter nonfontal faces by keypoints location. CURRENTRLY UNSUPPORTED.
"""
self._class_path = pathlib.Path(__file__).parent.absolute()#os.path.dirname(inspect.getfile(self.__class__))
self.device = device
self.target_size = target_size
self.min_face = min_face
self.frontal = frontal
if self.frontal:
print('Currently unavailable')
# self.anti_profile = joblib.load(os.path.join(self._class_path, 'models/anti_profile/anti_profile_xgb_new.pkl'))
self.detector = self.init_detector(weights_name,config_name)
def init_detector(self,weights_name,config_name):
print(self.device)
model_path = os.path.join(self._class_path,'weights/',weights_name)
print(model_path)
config_path = os.path.join(self._class_path,'models/',config_name)
state_dict = torch.load(model_path)
detector = Model(cfg=config_path)
detector.load_state_dict(state_dict)
detector = detector.to(self.device).float().eval()
for m in detector.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
return detector
def _preprocess(self,imgs):
"""
Preprocessing image before passing through the network. Resize and conversion to torch tensor.
"""
pp_imgs = []
for img in imgs:
h0, w0 = img.shape[:2] # orig hw
if self.target_size:
r = self.target_size / min(h0, w0) # resize image to img_size
if r < 1:
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR)
imgsz = check_img_size(max(img.shape[:2]), s=self.detector.stride.max()) # check img_size
img = letterbox(img, new_shape=imgsz)[0]
pp_imgs.append(img)
pp_imgs = np.array(pp_imgs)
pp_imgs = pp_imgs.transpose(0, 3, 1, 2)
pp_imgs = torch.from_numpy(pp_imgs).to(self.device)
pp_imgs = pp_imgs.float() # uint8 to fp16/32
pp_imgs /= 255.0 # 0 - 255 to 0.0 - 1.0
return pp_imgs
def _postprocess(self, imgs, origimgs, pred, conf_thres, iou_thres):
"""
Postprocessing of raw pytorch model output.
Returns:
bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2.
points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners).
"""
bboxes = []
landmarks = []
pred = non_max_suppression_face(pred, conf_thres, iou_thres)
for i in range(len(origimgs)):
img_shape = origimgs[i].shape
h,w = img_shape[:2]
gn = torch.tensor(img_shape)[[1, 0, 1, 0]] # normalization gain whwh
gn_lks = torch.tensor(img_shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]] # normalization gain landmarks
det = pred[i].cpu()
scaled_bboxes = scale_coords(imgs[i].shape[1:], det[:, :4], img_shape).round()
scaled_cords = scale_coords_landmarks(imgs[i].shape[1:], det[:, 5:15], img_shape).round()
for j in range(det.size()[0]):
box = (det[j, :4].view(1, 4) / gn).view(-1).tolist()
box = list(map(int,[box[0]*w,box[1]*h,box[2]*w,box[3]*h]))
if box[3] - box[1] < self.min_face:
continue
lm = (det[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist()
lm = list(map(int,[i*w if j%2==0 else i*h for j,i in enumerate(lm)]))
lm = [lm[i:i+2] for i in range(0,len(lm),2)]
bboxes.append(box)
landmarks.append(lm)
return bboxes, landmarks
def get_frontal_predict(self, box, points):
'''
Make a decision whether face is frontal by keypoints.
Returns:
True if face is frontal, False otherwise.
'''
cur_points = points.astype('int')
x1, y1, x2, y2 = box[0:4]
w = x2-x1
h = y2-y1
diag = sqrt(w**2+h**2)
dist = scipy.spatial.distance.pdist(cur_points)/diag
predict = self.anti_profile.predict(dist.reshape(1, -1))[0]
if predict == 0:
return True
else:
return False
def align(self, img, points):
'''
Align faces, found on images.
Params:
img: Single image, used in predict method.
points: list of keypoints, produced in predict method.
Returns:
crops: list of croped and aligned faces of shape (112,112,3).
'''
crops = [align_faces(img,landmark=np.array(i)) for i in points]
return crops
def predict(self, imgs, conf_thres = 0.3, iou_thres = 0.5):
'''
Get bbox coordinates and keypoints of faces on original image.
Params:
imgs: image or list of images to detect faces on
conf_thres: confidence threshold for each prediction
iou_thres: threshold for NMS (filtering of intersecting bboxes)
Returns:
bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2.
points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners).
'''
one_by_one = False
# Pass input images through face detector
if type(imgs) != list:
images = [imgs]
else:
images = imgs
one_by_one = False
shapes = {arr.shape for arr in images}
if len(shapes) != 1:
one_by_one = True
warnings.warn(f"Can't use batch predict due to different shapes of input images. Using one by one strategy.")
origimgs = copy.deepcopy(images)
if one_by_one:
images = [self._preprocess([img]) for img in images]
bboxes = []
points = []
for num, img in enumerate(images):
with torch.inference_mode():
single_pred = self.detector(img)[0]
bb, pt = self._postprocess(img, [origimgs[num]], single_pred, conf_thres, iou_thres)
bboxes.extend(bb)
points.extend(pt)
else:
images = self._preprocess(images)
with torch.inference_mode(): # change this with torch.no_grad() for pytorch <1.8 compatibility
pred = self.detector(images)[0]
bboxes, points = self._postprocess(images, origimgs, pred, conf_thres, iou_thres)
return bboxes, points
def __call__(self,*args):
return self.predict(*args)
if __name__=='__main__':
a = YoloDetector()

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 4d5252cb7281e0814b171d5173c5aaf6
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 39fd43af2df239295a0b15c0cbe17213
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: c4793bd6798c55660aa8207e28ab9dee
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 2e9512f11eb38f7d4b8f854271b56770
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 7ac03cdfdced751f1b9617ee875a8de4
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 25cfd8211daecb40aaf22c7e6ae79d25
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 3c77bcf298af2ccc4832b8a3d764e33a
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 03e3b9ba83c3574e6adfc218edb745a0
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 6b84f37da66f2fc03b1640f600fc85e3
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 58a4f03182fa5d888ba4494c5adbc4c3
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: d9100d11c73f87916bf44536fd71993d
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: bc38e7553aac7f07f8d8256153bbaaf4
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 0d90b6eec6467002699a9b2a72dd0d8b
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 8d834d24de0c9ec99b7030d59e4cf364
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 466634e04c74c7514afa0ea7abc77e44
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 4024281579d6736aabf61d9d6dadca6a
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 56e3d75e88cab38fa82a7caae27ad7a6
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,371 @@
# This file contains modules common to various models
import math
import numpy as np
import requests
import torch
import torch.nn as nn
from PIL import Image, ImageDraw
from Face_facial.utils.datasets import letterbox
from Face_facial.utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
from Face_facial.utils.plots import color_list
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
#self.act = self.act = nn.LeakyReLU(0.1, inplace=True) if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class StemBlock(nn.Module):
def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True):
super(StemBlock, self).__init__()
self.stem_1 = Conv(c1, c2, k, s, p, g, act)
self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0)
self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1)
self.stem_2p = nn.MaxPool2d(kernel_size=2,stride=2,ceil_mode=True)
self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0)
def forward(self, x):
stem_1_out = self.stem_1(x)
stem_2a_out = self.stem_2a(stem_1_out)
stem_2b_out = self.stem_2b(stem_2a_out)
stem_2p_out = self.stem_2p(stem_1_out)
out = self.stem_3(torch.cat((stem_2b_out,stem_2p_out),1))
return out
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class ShuffleV2Block(nn.Module):
def __init__(self, inp, oup, stride):
super(ShuffleV2Block, self).__init__()
if not (1 <= stride <= 3):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(),
)
else:
self.branch1 = nn.Sequential()
self.branch2 = nn.Sequential(
nn.Conv2d(inp if (self.stride > 1) else branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.SiLU(),
)
@staticmethod
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x):
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
class autoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
img_size = 640 # inference size (pixels)
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
def __init__(self, model):
super(autoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
# filename: imgs = 'data/samples/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
# numpy: = np.zeros((720,1280,3)) # HWC
# torch: = torch.zeros(16,3,720,1280) # BCHW
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1 = [], [] # image and inference shapes
for i, im in enumerate(imgs):
if isinstance(im, str): # filename or uri
im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open
im = np.array(im) # to numpy
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
# Inference
with torch.no_grad():
y = self.model(x, augment, profile)[0] # forward
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
# Post-process
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
return Detections(imgs, y, self.names)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, names=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred)
def display(self, pprint=False, show=False, save=False, render=False):
colors = color_list()
for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f'{n} {self.names[int(c)]}s, ' # add to string
if show or save or render:
img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
for *box, conf, cls in pred: # xyxy, confidence, class
# str += '%s %.2f, ' % (names[int(cls)], conf) # label
ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot
if pprint:
print(str)
if show:
img.show(f'Image {i}') # show
if save:
f = f'results{i}.jpg'
str += f"saved to '{f}'"
img.save(f) # save
if render:
self.imgs[i] = np.asarray(img)
def print(self):
self.display(pprint=True) # print results
def show(self):
self.display(show=True) # show results
def save(self):
self.display(save=True) # save results
def render(self):
self.display(render=True) # render results
return self.imgs
def __len__(self):
return self.n
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 2123ff669ad858341a6a3c4d08f20035
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,133 @@
# This file contains experimental modules
import numpy as np
import torch
import torch.nn as nn
from Face_facial.models.common import Conv, DWConv
from Face_facial.utils.google_utils import attempt_download
class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super(CrossConv, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super(Sum, self).__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super(GhostConv, self).__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k, s):
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class MixConv2d(nn.Module):
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super(MixConv2d, self).__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()
def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def attempt_load(weights, map_location=None):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
attempt_download(w)
model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 058e0fcc2acd773a3829a4a656cc4683
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,71 @@
"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
Usage:
$ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
"""
import argparse
import sys
import time
sys.path.append('./') # to run '$ python *.py' files in subdirectories
import torch
import torch.nn as nn
from yoloface.models.experimental import attempt_load
from yoloface.models.common import Conv
from yoloface.utils.activations import Hardswish, SiLU
from yoloface.utils.general import set_logging, check_img_size
import onnx
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
set_logging()
t = time.time()
# Load PyTorch model
model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model
model.eval()
labels = model.names
# Checks
gs = int(max(model.stride)) # grid size (max stride)
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
# Input
img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection
# Update model
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if isinstance(m, Conv): # assign export-friendly activations
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
# elif isinstance(m, models.yolo.Detect):
# m.forward = m.forward_export # assign forward (optional)
model.model[-1].export = True # set Detect() layer export=True
y = model(img) # dry run
# ONNX export
print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
f = opt.weights.replace('.pt', '.onnx') # filename
model.fuse() # only for ONNX
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['data'],
output_names=['stride_' + str(int(x)) for x in model.stride])
# Checks
onnx_model = onnx.load(f) # load onnx model
onnx.checker.check_model(onnx_model) # check onnx model
# print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
print('ONNX export success, saved as %s' % f)
# Finish
print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 1511d937c41421e678812f05811fbee4
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,307 @@
import argparse
import logging
import math
import sys
from copy import deepcopy
from pathlib import Path
import torch
import torch.nn as nn
from Face_facial.models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, C3, ShuffleV2Block, Concat, NMS, autoShape, StemBlock
from Face_facial.models.experimental import MixConv2d, CrossConv
from Face_facial.utils.autoanchor import check_anchor_order
from Face_facial.utils.general import make_divisible, check_file, set_logging
from Face_facial.utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
#self.no = nc + 5 # number of outputs per anchor
self.no = nc + 5 + 10 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
# self.training |= self.export
if self.export:
for i in range(self.nl):
x[i] = self.m[i](x[i])
return x
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = torch.full_like(x[i], 0)
y[..., [0,1,2,3,4,15]] = x[i][..., [0,1,2,3,4,15]].sigmoid()
y[..., 5:15] = x[i][..., 5:15]
#y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
#y[..., 5:15] = y[..., 5:15] * 8 - 4
y[..., 5:7] = y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1
y[..., 7:9] = y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x2 y2
y[..., 9:11] = y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x3 y3
y[..., 11:13] = y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x4 y4
y[..., 13:15] = y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x5 y5
#y[..., 5:7] = (y[..., 5:7] * 2 -1) * self.anchor_grid[i] # landmark x1 y1
#y[..., 7:9] = (y[..., 7:9] * 2 -1) * self.anchor_grid[i] # landmark x2 y2
#y[..., 9:11] = (y[..., 9:11] * 2 -1) * self.anchor_grid[i] # landmark x3 y3
#y[..., 11:13] = (y[..., 11:13] * 2 -1) * self.anchor_grid[i] # landmark x4 y4
#y[..., 13:15] = (y[..., 13:15] * 2 -1) * self.anchor_grid[i] # landmark x5 y5
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)], indexing='ij')
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
# logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
self.yaml['nc'] = nc # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 128 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
# logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si)
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
# logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, ShuffleV2Block, StemBlock]:
c1, c2 = ch[f], args[0]
# Normal
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1.75 # exponential (default 2.0)
# e = math.log(c2 / ch[1]) / math.log(2)
# c2 = int(ch[1] * ex ** e)
# if m != Focus:
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
# Experimental
# if i > 0 and args[0] != no: # channel expansion factor
# ex = 1 + gw # exponential (default 2.0)
# ch1 = 32 # ch[1]
# e = math.log(c2 / ch1) / math.log(2) # level 1-n
# c2 = int(ch1 * ex ** e)
# if m != Focus:
# c2 = make_divisible(c2, 8) if c2 != no else c2
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
elif m is Detect:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
# logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
from thop import profile
from thop import clever_format
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
stride = model.stride.max()
if stride == 32:
input = torch.Tensor(1, 3, 480, 640).to(device)
else:
input = torch.Tensor(1, 3, 512, 640).to(device)
model.train()
# print(model)
flops, params = profile(model, inputs=(input, ))
flops, params = clever_format([flops, params], "%.3f")
print('Flops:', flops, ',Params:' ,params)

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: d067765dcc553892b964579ec590708f
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,46 @@
# parameters
nc: 1 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 0.5 # layer channel multiple
# anchors
anchors:
- [4,5, 8,10, 13,16] # P3/8
- [23,29, 43,55, 73,105] # P4/16
- [146,217, 231,300, 335,433] # P5/32
# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4
[-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8
[-1, 3, ShuffleV2Block, [128, 1]], # 2
[-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16
[-1, 7, ShuffleV2Block, [256, 1]], # 4
[-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32
[-1, 3, ShuffleV2Block, [512, 1]], # 6
]
# YOLOv5 head
head:
[[-1, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P4
[-1, 1, C3, [128, False]], # 10
[-1, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 2], 1, Concat, [1]], # cat backbone P3
[-1, 1, C3, [128, False]], # 14 (P3/8-small)
[-1, 1, Conv, [128, 3, 2]],
[[-1, 11], 1, Concat, [1]], # cat head P4
[-1, 1, C3, [128, False]], # 17 (P4/16-medium)
[-1, 1, Conv, [128, 3, 2]],
[[-1, 7], 1, Concat, [1]], # cat head P5
[-1, 1, C3, [128, False]], # 20 (P5/32-large)
[[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: f4fde361175c025209814266160b9a6a
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,47 @@
# parameters
nc: 1 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple
# anchors
anchors:
- [4,5, 8,10, 13,16] # P3/8
- [23,29, 43,55, 73,105] # P4/16
- [146,217, 231,300, 335,433] # P5/32
# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 2-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 4-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 6-P5/32
[-1, 1, SPP, [1024, [3,5,7]]],
[-1, 3, C3, [1024, False]], # 8
]
# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 5], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 12
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 3], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 16 (P3/8-small)
[-1, 1, Conv, [256, 3, 2]],
[[-1, 13], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 19 (P4/16-medium)
[-1, 1, Conv, [512, 3, 2]],
[[-1, 9], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 22 (P5/32-large)
[[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: c57cd8641b985a56d89f703243f66c8c
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,60 @@
# parameters
nc: 1 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple
# anchors
anchors:
- [6,7, 9,11, 13,16] # P3/8
- [18,23, 26,33, 37,47] # P4/16
- [54,67, 77,104, 112,154] # P5/32
- [174,238, 258,355, 445,568] # P6/64
# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, StemBlock, [ 64, 3, 2 ] ], # 0-P1/2
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 2-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 4-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 6-P5/32
[ -1, 3, C3, [ 768 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 8-P6/64
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
[ -1, 3, C3, [ 1024, False ] ], # 10
]
# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 7 ], 1, Concat, [ 1 ] ], # cat backbone P5
[ -1, 3, C3, [ 768, False ] ], # 14
[ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 5 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 18
[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 3 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 22 (P3/8-small)
[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 19 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 25 (P4/16-medium)
[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 15 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 768, False ] ], # 28 (P5/32-large)
[ -1, 1, Conv, [ 768, 3, 2 ] ],
[ [ -1, 11 ], 1, Concat, [ 1 ] ], # cat head P6
[ -1, 3, C3, [ 1024, False ] ], # 31 (P6/64-xlarge)
[ [ 22, 25, 28, 31 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
]

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 756e950ece318f9af8c5f25b19a3b6e4
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,47 @@
# parameters
nc: 1 # number of classes
depth_multiple: 0.67 # model depth multiple
width_multiple: 0.75 # layer channel multiple
# anchors
anchors:
- [4,5, 8,10, 13,16] # P3/8
- [23,29, 43,55, 73,105] # P4/16
- [146,217, 231,300, 335,433] # P5/32
# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 2-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 4-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 6-P5/32
[-1, 1, SPP, [1024, [3,5,7]]],
[-1, 3, C3, [1024, False]], # 8
]
# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 5], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 12
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 3], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 16 (P3/8-small)
[-1, 1, Conv, [256, 3, 2]],
[[-1, 13], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 19 (P4/16-medium)
[-1, 1, Conv, [512, 3, 2]],
[[-1, 9], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 22 (P5/32-large)
[[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

View File

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: 5560f216fee11280082f727c7c7ef80d
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

Some files were not shown because too many files have changed in this diff Show More