Commit 
							
							Β·
						
						69c590e
	
1
								Parent(s):
							
							90aecd1
								
with private models
Browse files- .idea/.gitignore +3 -0
- .idea/AFFA-face-swap.iml +15 -0
- .idea/inspectionProfiles/Project_Default.xml +38 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +4 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- app.py +105 -0
- networks/__pycache__/generator.cpython-37.pyc +0 -0
- networks/__pycache__/generator.cpython-38.pyc +0 -0
- networks/__pycache__/layers.cpython-37.pyc +0 -0
- networks/__pycache__/layers.cpython-38.pyc +0 -0
- networks/generator.py +321 -0
- networks/layers.py +0 -0
- options/__pycache__/swap_options.cpython-37.pyc +0 -0
- options/__pycache__/swap_options.cpython-38.pyc +0 -0
- options/swap_options.py +43 -0
- requirements.txt +6 -0
- retinaface/__pycache__/anchor.cpython-37.pyc +0 -0
- retinaface/__pycache__/anchor.cpython-38.pyc +0 -0
- retinaface/__pycache__/models.cpython-37.pyc +0 -0
- retinaface/__pycache__/models.cpython-38.pyc +0 -0
- retinaface/__pycache__/ops.cpython-37.pyc +0 -0
- retinaface/anchor.py +296 -0
- retinaface/models.py +301 -0
- retinaface/ops.py +27 -0
- utils/__pycache__/utils.cpython-38.pyc +0 -0
- utils/utils.py +377 -0
    	
        .idea/.gitignore
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # Default ignored files
         | 
| 2 | 
            +
            /shelf/
         | 
| 3 | 
            +
            /workspace.xml
         | 
    	
        .idea/AFFA-face-swap.iml
    ADDED
    
    | @@ -0,0 +1,15 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            <?xml version="1.0" encoding="UTF-8"?>
         | 
| 2 | 
            +
            <module type="PYTHON_MODULE" version="4">
         | 
| 3 | 
            +
              <component name="NewModuleRootManager">
         | 
| 4 | 
            +
                <content url="file://$MODULE_DIR$" />
         | 
| 5 | 
            +
                <orderEntry type="inheritedJdk" />
         | 
| 6 | 
            +
                <orderEntry type="sourceFolder" forTests="false" />
         | 
| 7 | 
            +
              </component>
         | 
| 8 | 
            +
              <component name="PyDocumentationSettings">
         | 
| 9 | 
            +
                <option name="format" value="PLAIN" />
         | 
| 10 | 
            +
                <option name="myDocStringFormat" value="Plain" />
         | 
| 11 | 
            +
              </component>
         | 
| 12 | 
            +
              <component name="TestRunnerService">
         | 
| 13 | 
            +
                <option name="PROJECT_TEST_RUNNER" value="pytest" />
         | 
| 14 | 
            +
              </component>
         | 
| 15 | 
            +
            </module>
         | 
    	
        .idea/inspectionProfiles/Project_Default.xml
    ADDED
    
    | @@ -0,0 +1,38 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            <component name="InspectionProjectProfileManager">
         | 
| 2 | 
            +
              <profile version="1.0">
         | 
| 3 | 
            +
                <option name="myName" value="Project Default" />
         | 
| 4 | 
            +
                <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
         | 
| 5 | 
            +
                  <option name="ignoredPackages">
         | 
| 6 | 
            +
                    <value>
         | 
| 7 | 
            +
                      <list size="3">
         | 
| 8 | 
            +
                        <item index="0" class="java.lang.String" itemvalue="ipython" />
         | 
| 9 | 
            +
                        <item index="1" class="java.lang.String" itemvalue="Cython" />
         | 
| 10 | 
            +
                        <item index="2" class="java.lang.String" itemvalue="tensorflow-gpu" />
         | 
| 11 | 
            +
                      </list>
         | 
| 12 | 
            +
                    </value>
         | 
| 13 | 
            +
                  </option>
         | 
| 14 | 
            +
                </inspection_tool>
         | 
| 15 | 
            +
                <inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
         | 
| 16 | 
            +
                  <option name="ignoredErrors">
         | 
| 17 | 
            +
                    <list>
         | 
| 18 | 
            +
                      <option value="E402" />
         | 
| 19 | 
            +
                    </list>
         | 
| 20 | 
            +
                  </option>
         | 
| 21 | 
            +
                </inspection_tool>
         | 
| 22 | 
            +
                <inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
         | 
| 23 | 
            +
                  <option name="ignoredErrors">
         | 
| 24 | 
            +
                    <list>
         | 
| 25 | 
            +
                      <option value="N806" />
         | 
| 26 | 
            +
                      <option value="N812" />
         | 
| 27 | 
            +
                    </list>
         | 
| 28 | 
            +
                  </option>
         | 
| 29 | 
            +
                </inspection_tool>
         | 
| 30 | 
            +
                <inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
         | 
| 31 | 
            +
                  <option name="ignoredIdentifiers">
         | 
| 32 | 
            +
                    <list>
         | 
| 33 | 
            +
                      <option value="torch.backends.cudnn" />
         | 
| 34 | 
            +
                    </list>
         | 
| 35 | 
            +
                  </option>
         | 
| 36 | 
            +
                </inspection_tool>
         | 
| 37 | 
            +
              </profile>
         | 
| 38 | 
            +
            </component>
         | 
    	
        .idea/inspectionProfiles/profiles_settings.xml
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            <component name="InspectionProjectProfileManager">
         | 
| 2 | 
            +
              <settings>
         | 
| 3 | 
            +
                <option name="USE_PROJECT_PROFILE" value="false" />
         | 
| 4 | 
            +
                <version value="1.0" />
         | 
| 5 | 
            +
              </settings>
         | 
| 6 | 
            +
            </component>
         | 
    	
        .idea/misc.xml
    ADDED
    
    | @@ -0,0 +1,4 @@ | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            <?xml version="1.0" encoding="UTF-8"?>
         | 
| 2 | 
            +
            <project version="4">
         | 
| 3 | 
            +
              <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (base)" project-jdk-type="Python SDK" />
         | 
| 4 | 
            +
            </project>
         | 
    	
        .idea/modules.xml
    ADDED
    
    | @@ -0,0 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            <?xml version="1.0" encoding="UTF-8"?>
         | 
| 2 | 
            +
            <project version="4">
         | 
| 3 | 
            +
              <component name="ProjectModuleManager">
         | 
| 4 | 
            +
                <modules>
         | 
| 5 | 
            +
                  <module fileurl="file://$PROJECT_DIR$/.idea/AFFA-face-swap.iml" filepath="$PROJECT_DIR$/.idea/AFFA-face-swap.iml" />
         | 
| 6 | 
            +
                </modules>
         | 
| 7 | 
            +
              </component>
         | 
| 8 | 
            +
            </project>
         | 
    	
        .idea/vcs.xml
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            <?xml version="1.0" encoding="UTF-8"?>
         | 
| 2 | 
            +
            <project version="4">
         | 
| 3 | 
            +
              <component name="VcsDirectoryMappings">
         | 
| 4 | 
            +
                <mapping directory="$PROJECT_DIR$" vcs="Git" />
         | 
| 5 | 
            +
              </component>
         | 
| 6 | 
            +
            </project>
         | 
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,105 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import gradio
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            import tensorflow as tf
         | 
| 4 | 
            +
            from huggingface_hub import Repository
         | 
| 5 | 
            +
            repo = Repository()
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            from utils.utils import norm_crop, estimate_norm, inverse_estimate_norm, transform_landmark_points, get_lm, load_model_internal
         | 
| 8 | 
            +
            from networks.generator import get_generator
         | 
| 9 | 
            +
            import numpy as np
         | 
| 10 | 
            +
            import cv2
         | 
| 11 | 
            +
            from scipy.ndimage import gaussian_filter
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            from tensorflow.keras.models import load_model
         | 
| 14 | 
            +
            from retinaface.models import *
         | 
| 15 | 
            +
            from options.swap_options import SwapOptions
         | 
| 16 | 
            +
             | 
| 17 | 
            +
             | 
| 18 | 
            +
            opt = SwapOptions().parse()
         | 
| 19 | 
            +
             | 
| 20 | 
            +
             | 
| 21 | 
            +
            #gpus = tf.config.experimental.list_physical_devices('GPU')
         | 
| 22 | 
            +
            #tf.config.set_visible_devices(gpus[opt.device_id], 'GPU')
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            RetinaFace = load_model(opt.retina_path,
         | 
| 25 | 
            +
                                    custom_objects={"FPN": FPN,
         | 
| 26 | 
            +
                                                    "SSH": SSH,
         | 
| 27 | 
            +
                                                    "BboxHead": BboxHead,
         | 
| 28 | 
            +
                                                    "LandmarkHead": LandmarkHead,
         | 
| 29 | 
            +
                                                    "ClassHead": ClassHead})
         | 
| 30 | 
            +
            ArcFace = load_model(opt.arcface_path)
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            G = load_model_internal(opt.chkp_dir + opt.log_name + "/gen/", "gen", opt.load)
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            blend_mask_base = np.zeros(shape=(256, 256, 1))
         | 
| 35 | 
            +
            blend_mask_base[100:240, 32:224] = 1
         | 
| 36 | 
            +
            blend_mask_base = gaussian_filter(blend_mask_base, sigma=7)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
             | 
| 39 | 
            +
            def run_inference(target, source):
         | 
| 40 | 
            +
                source = np.array(source)
         | 
| 41 | 
            +
                target = np.array(target)
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                # Prepare to load video
         | 
| 44 | 
            +
                source_a = RetinaFace(np.expand_dims(source, axis=0)).numpy()[0]
         | 
| 45 | 
            +
                source_h, source_w, _ = source.shape
         | 
| 46 | 
            +
                source_lm = get_lm(source_a, source_w, source_h)
         | 
| 47 | 
            +
                source_aligned = norm_crop(source, source_lm, image_size=256)
         | 
| 48 | 
            +
                source_z = ArcFace.predict(np.expand_dims(tf.image.resize(source_aligned, [112, 112]) / 255.0, axis=0))
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                # read frame
         | 
| 51 | 
            +
                im = target
         | 
| 52 | 
            +
                im_h, im_w, _ = im.shape
         | 
| 53 | 
            +
                im_shape = (im_w, im_h)
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                detection_scale = im_w // 640 if im_w > 640 else 1
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                faces = RetinaFace(np.expand_dims(cv2.resize(im,
         | 
| 58 | 
            +
                                                             (im_w // detection_scale,
         | 
| 59 | 
            +
                                                              im_h // detection_scale)), axis=0)).numpy()
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                total_img = im / 255.0
         | 
| 62 | 
            +
                for annotation in faces:
         | 
| 63 | 
            +
                    lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h],
         | 
| 64 | 
            +
                                         [annotation[6] * im_w, annotation[7] * im_h],
         | 
| 65 | 
            +
                                         [annotation[8] * im_w, annotation[9] * im_h],
         | 
| 66 | 
            +
                                         [annotation[10] * im_w, annotation[11] * im_h],
         | 
| 67 | 
            +
                                         [annotation[12] * im_w, annotation[13] * im_h]],
         | 
| 68 | 
            +
                                        dtype=np.float32)
         | 
| 69 | 
            +
             | 
| 70 | 
            +
                    # align the detected face
         | 
| 71 | 
            +
                    M, pose_index = estimate_norm(lm_align, 256, "arcface", shrink_factor=1.0)
         | 
| 72 | 
            +
                    im_aligned = cv2.warpAffine(im, M, (256, 256), borderValue=0.0)
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                    # face swap
         | 
| 75 | 
            +
                    changed_face_cage = G.predict([np.expand_dims((im_aligned - 127.5) / 127.5, axis=0),
         | 
| 76 | 
            +
                                                   source_z])
         | 
| 77 | 
            +
                    changed_face = (changed_face_cage[0] + 1) / 2
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                    # get inverse transformation landmarks
         | 
| 80 | 
            +
                    transformed_lmk = transform_landmark_points(M, lm_align)
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                    # warp image back
         | 
| 83 | 
            +
                    iM, _ = inverse_estimate_norm(lm_align, transformed_lmk, 256, "arcface", shrink_factor=1.0)
         | 
| 84 | 
            +
                    iim_aligned = cv2.warpAffine(changed_face, iM, im_shape, borderValue=0.0)
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                    # blend swapped face with target image
         | 
| 87 | 
            +
                    blend_mask = cv2.warpAffine(blend_mask_base, iM, im_shape, borderValue=0.0)
         | 
| 88 | 
            +
                    blend_mask = np.expand_dims(blend_mask, axis=-1)
         | 
| 89 | 
            +
                    total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                if opt.compare:
         | 
| 92 | 
            +
                    total_img = np.concatenate((im / 255.0, total_img), axis=1)
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                total_img = np.clip(total_img, 0, 1)
         | 
| 95 | 
            +
                total_img *= 255.0
         | 
| 96 | 
            +
                total_img = total_img.astype('uint8')
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                return total_img
         | 
| 99 | 
            +
             | 
| 100 | 
            +
             | 
| 101 | 
            +
            iface = gradio.Interface(run_inference,
         | 
| 102 | 
            +
                                     [gradio.inputs.Image(shape=None),
         | 
| 103 | 
            +
                                      gradio.inputs.Image(shape=None)],
         | 
| 104 | 
            +
                                     gradio.outputs.Image())
         | 
| 105 | 
            +
            iface.launch()
         | 
    	
        networks/__pycache__/generator.cpython-37.pyc
    ADDED
    
    | Binary file (7.03 kB). View file | 
|  | 
    	
        networks/__pycache__/generator.cpython-38.pyc
    ADDED
    
    | Binary file (6.54 kB). View file | 
|  | 
    	
        networks/__pycache__/layers.cpython-37.pyc
    ADDED
    
    | Binary file (69.1 kB). View file | 
|  | 
    	
        networks/__pycache__/layers.cpython-38.pyc
    ADDED
    
    | Binary file (63.7 kB). View file | 
|  | 
    	
        networks/generator.py
    ADDED
    
    | @@ -0,0 +1,321 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from tensorflow.keras.layers import *
         | 
| 2 | 
            +
            from tensorflow.keras.models import Model
         | 
| 3 | 
            +
            from tensorflow_addons.layers import InstanceNormalization
         | 
| 4 | 
            +
            from networks.layers import AdaIN, AdaptiveAttention
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            import numpy as np
         | 
| 7 | 
            +
             | 
| 8 | 
            +
             | 
| 9 | 
            +
            def residual_down_block(inputs, filters, resample=True):
         | 
| 10 | 
            +
                x = inputs
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                r = Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(x)
         | 
| 13 | 
            +
                if resample:
         | 
| 14 | 
            +
                    r = AveragePooling2D()(r)
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                x = InstanceNormalization()(x)
         | 
| 17 | 
            +
                x = LeakyReLU(0.2)(x)
         | 
| 18 | 
            +
                x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                if resample:
         | 
| 21 | 
            +
                    x = AveragePooling2D()(x)
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                x = Add()([x, r])
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                return x
         | 
| 26 | 
            +
             | 
| 27 | 
            +
             | 
| 28 | 
            +
            def residual_up_block(inputs, filters, resample=True, name=None):
         | 
| 29 | 
            +
                x, z_id = inputs
         | 
| 30 | 
            +
             | 
| 31 | 
            +
                r = Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(x)
         | 
| 32 | 
            +
                if resample:
         | 
| 33 | 
            +
                    r = UpSampling2D(interpolation='bilinear')(r)
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                x = InstanceNormalization()(x)
         | 
| 36 | 
            +
                x = AdaIN()([x, z_id])
         | 
| 37 | 
            +
                x = LeakyReLU(0.2)(x)
         | 
| 38 | 
            +
                x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                if resample:
         | 
| 41 | 
            +
                    x = UpSampling2D(interpolation='bilinear')(x)
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                x = Add()([x, r])
         | 
| 44 | 
            +
             | 
| 45 | 
            +
                return x
         | 
| 46 | 
            +
             | 
| 47 | 
            +
             | 
| 48 | 
            +
            def adaptive_attention(inputs, filters, name=None):
         | 
| 49 | 
            +
                x_t, x_s = inputs
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                m = Concatenate(axis=-1)([x_t, x_s])
         | 
| 52 | 
            +
                m = Conv2D(filters=filters // 4, kernel_size=3, strides=1, padding='same')(m)
         | 
| 53 | 
            +
                m = LeakyReLU(0.2)(m)
         | 
| 54 | 
            +
                m = InstanceNormalization()(m)
         | 
| 55 | 
            +
                m = Conv2D(filters=filters, kernel_size=1, strides=1, padding='same', activation='sigmoid', name=name)(m)
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                x = AdaptiveAttention()([m, x_t, x_s])
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                return x
         | 
| 60 | 
            +
             | 
| 61 | 
            +
             | 
| 62 | 
            +
            def adaptive_fusion_up_block(inputs, filters, resample=True, name=None):
         | 
| 63 | 
            +
                x_t, x_s, z_id = inputs
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                x = adaptive_attention([x_t, x_s], x_t.shape[-1], name=name)
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                r = Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(x)
         | 
| 68 | 
            +
                if resample:
         | 
| 69 | 
            +
                    r = UpSampling2D(interpolation='bilinear')(r)
         | 
| 70 | 
            +
             | 
| 71 | 
            +
                x = InstanceNormalization()(x)
         | 
| 72 | 
            +
                x = AdaIN()([x, z_id])
         | 
| 73 | 
            +
                x = LeakyReLU(0.2)(x)
         | 
| 74 | 
            +
                x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                if resample:
         | 
| 77 | 
            +
                    x = UpSampling2D(interpolation='bilinear')(x)
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                x = Add()([x, r])
         | 
| 80 | 
            +
             | 
| 81 | 
            +
                return x
         | 
| 82 | 
            +
             | 
| 83 | 
            +
             | 
| 84 | 
            +
            def dual_adaptive_fusion_up_block(inputs, filters, resample=True, name=None):
         | 
| 85 | 
            +
                x_t, x_s, z_id = inputs
         | 
| 86 | 
            +
             | 
| 87 | 
            +
                x = adaptive_attention([x_t, x_s], x_t.shape[-1], name=name + '_0')
         | 
| 88 | 
            +
                x = adaptive_attention([x_t, x], x_t.shape[-1], name=name + '_1')
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                r = Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(x)
         | 
| 91 | 
            +
                if resample:
         | 
| 92 | 
            +
                    r = UpSampling2D(interpolation='bilinear')(r)
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                x = InstanceNormalization()(x)
         | 
| 95 | 
            +
                x = AdaIN()([x, z_id])
         | 
| 96 | 
            +
                x = LeakyReLU(0.2)(x)
         | 
| 97 | 
            +
                x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)
         | 
| 98 | 
            +
             | 
| 99 | 
            +
                if resample:
         | 
| 100 | 
            +
                    x = UpSampling2D(interpolation='bilinear')(x)
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                x = Add()([x, r])
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                return x
         | 
| 105 | 
            +
             | 
| 106 | 
            +
             | 
| 107 | 
            +
            def adaptive_fusion_up_block_no_add(inputs, filters, resample=True, name=None):
         | 
| 108 | 
            +
                x_t, x_s, z_id = inputs
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                x = adaptive_attention([x_t, x_s], x_t.shape[-1], name=name)
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                x = InstanceNormalization()(x)
         | 
| 113 | 
            +
                x = AdaIN()([x, z_id])
         | 
| 114 | 
            +
                x = LeakyReLU(0.2)(x)
         | 
| 115 | 
            +
                x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                return x
         | 
| 118 | 
            +
             | 
| 119 | 
            +
             | 
| 120 | 
            +
            def adaptive_fusion_up_block_concat_baseline(inputs, filters, resample=True, name=None):
         | 
| 121 | 
            +
                x_t, x_s, z_id = inputs
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                x = Concatenate(axis=-1)([x_t, x_s])
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                r = Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(x)
         | 
| 126 | 
            +
                if resample:
         | 
| 127 | 
            +
                    r = UpSampling2D(interpolation='bilinear')(r)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                x = InstanceNormalization()(x)
         | 
| 130 | 
            +
                x = AdaIN()([x, z_id])
         | 
| 131 | 
            +
                x = LeakyReLU(0.2)(x)
         | 
| 132 | 
            +
                x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                if resample:
         | 
| 135 | 
            +
                    x = UpSampling2D(interpolation='bilinear')(x)
         | 
| 136 | 
            +
             | 
| 137 | 
            +
                x = Add(name=name if name == 'final' else None)([x, r])
         | 
| 138 | 
            +
             | 
| 139 | 
            +
                return x
         | 
| 140 | 
            +
             | 
| 141 | 
            +
             | 
| 142 | 
            +
            def adaptive_fusion_up_block_add_baseline(inputs, filters, resample=True, name=None):
         | 
| 143 | 
            +
                x_t, x_s, z_id = inputs
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                x = Add()([x_t, x_s])
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                r = Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(x)
         | 
| 148 | 
            +
                if resample:
         | 
| 149 | 
            +
                    r = UpSampling2D(interpolation='bilinear')(r)
         | 
| 150 | 
            +
             | 
| 151 | 
            +
                x = InstanceNormalization()(x)
         | 
| 152 | 
            +
                x = AdaIN()([x, z_id])
         | 
| 153 | 
            +
                x = LeakyReLU(0.2)(x)
         | 
| 154 | 
            +
                x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)
         | 
| 155 | 
            +
             | 
| 156 | 
            +
                if resample:
         | 
| 157 | 
            +
                    x = UpSampling2D(interpolation='bilinear')(x)
         | 
| 158 | 
            +
             | 
| 159 | 
            +
                x = Add()([x, r])
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                return x
         | 
| 162 | 
            +
             | 
| 163 | 
            +
             | 
| 164 | 
            +
            def get_generator_original(mapping_depth=4, mapping_size=256):
         | 
| 165 | 
            +
                x_target = Input(shape=(256, 256, 3))
         | 
| 166 | 
            +
                z_source = Input(shape=(512,))
         | 
| 167 | 
            +
             | 
| 168 | 
            +
                z_id = z_source
         | 
| 169 | 
            +
                for m in range(np.max([mapping_depth - 1, 0])):
         | 
| 170 | 
            +
                    z_id = Dense(mapping_size)(z_id)
         | 
| 171 | 
            +
                    z_id = LeakyReLU(0.2)(z_id)
         | 
| 172 | 
            +
                if mapping_depth >= 1:
         | 
| 173 | 
            +
                    z_id = Dense(mapping_size)(z_id)
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                x_0 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x_target)    # 256
         | 
| 176 | 
            +
             | 
| 177 | 
            +
                x_1 = residual_down_block(x_0, 128)                                             # 128
         | 
| 178 | 
            +
             | 
| 179 | 
            +
                x_2 = residual_down_block(x_1, 256)                                             # 64
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                x_3 = residual_down_block(x_2, 512)
         | 
| 182 | 
            +
             | 
| 183 | 
            +
                x_4 = residual_down_block(x_3, 512)
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                x_5 = residual_down_block(x_4, 512)
         | 
| 186 | 
            +
             | 
| 187 | 
            +
                x_6 = residual_down_block(x_5, 512, resample=False)
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                u_5 = residual_up_block([x_6, z_id], 512, resample=False)
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                u_4 = residual_up_block([u_5, z_id], 512)
         | 
| 192 | 
            +
             | 
| 193 | 
            +
                u_3 = residual_up_block([u_4, z_id], 512)
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                u_2 = residual_up_block([u_3, z_id], 256)                   # 64
         | 
| 196 | 
            +
             | 
| 197 | 
            +
                u_1 = adaptive_fusion_up_block([x_2, u_2, z_id], 128, name='aff_attention_64x64')       # 128
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                u_0 = adaptive_fusion_up_block([x_1, u_1, z_id], 64, name='aff_attention_128x128')        # 256
         | 
| 200 | 
            +
             | 
| 201 | 
            +
                out = adaptive_fusion_up_block([x_0, u_0, z_id], 3, resample=False, name='aff_attention_256x256')
         | 
| 202 | 
            +
             | 
| 203 | 
            +
                gen_model = Model([x_target, z_source], out)
         | 
| 204 | 
            +
                gen_model.summary()
         | 
| 205 | 
            +
             | 
| 206 | 
            +
                return gen_model
         | 
| 207 | 
            +
             | 
| 208 | 
            +
             | 
| 209 | 
            +
            def make_layer(l_type, inputs, filters, resample, name=None):
         | 
| 210 | 
            +
                if l_type == 'affa':
         | 
| 211 | 
            +
                    return adaptive_fusion_up_block(inputs, filters, resample=resample, name=name)
         | 
| 212 | 
            +
                if l_type == 'd_affa':
         | 
| 213 | 
            +
                    return dual_adaptive_fusion_up_block(inputs, filters, resample=resample, name=name)
         | 
| 214 | 
            +
                elif l_type == 'concat':
         | 
| 215 | 
            +
                    return adaptive_fusion_up_block_concat_baseline(inputs, filters, resample=resample, name=name)
         | 
| 216 | 
            +
                elif l_type == 'no_skip':
         | 
| 217 | 
            +
                    return residual_up_block(inputs[1:], filters, resample=resample)
         | 
| 218 | 
            +
             | 
| 219 | 
            +
             | 
| 220 | 
            +
            def get_generator(up_types=None, mapping_depth=4, mapping_size=256):
         | 
| 221 | 
            +
             | 
| 222 | 
            +
                if up_types is None:
         | 
| 223 | 
            +
                    up_types = ['no_skip', 'no_skip', 'd_affa', 'd_affa', 'd_affa', 'concat']
         | 
| 224 | 
            +
             | 
| 225 | 
            +
                x_target = Input(shape=(256, 256, 3))
         | 
| 226 | 
            +
                z_source = Input(shape=(512,))
         | 
| 227 | 
            +
             | 
| 228 | 
            +
                z_id = z_source
         | 
| 229 | 
            +
                for m in range(np.max([mapping_depth - 1, 0])):
         | 
| 230 | 
            +
                    z_id = Dense(mapping_size)(z_id)
         | 
| 231 | 
            +
                    z_id = LeakyReLU(0.2)(z_id)
         | 
| 232 | 
            +
                if mapping_depth >= 1:
         | 
| 233 | 
            +
                    z_id = Dense(mapping_size)(z_id)
         | 
| 234 | 
            +
             | 
| 235 | 
            +
                x_0 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x_target)    # 256
         | 
| 236 | 
            +
             | 
| 237 | 
            +
                x_1 = residual_down_block(x_0, 128)                                             # 128
         | 
| 238 | 
            +
             | 
| 239 | 
            +
                x_2 = residual_down_block(x_1, 256)                                             # 64
         | 
| 240 | 
            +
             | 
| 241 | 
            +
                x_3 = residual_down_block(x_2, 512)
         | 
| 242 | 
            +
             | 
| 243 | 
            +
                x_4 = residual_down_block(x_3, 512)
         | 
| 244 | 
            +
             | 
| 245 | 
            +
                x_5 = residual_down_block(x_4, 512)
         | 
| 246 | 
            +
             | 
| 247 | 
            +
                x_6 = residual_down_block(x_5, 512, resample=False)
         | 
| 248 | 
            +
             | 
| 249 | 
            +
                u_5 = residual_up_block([x_6, z_id], 512, resample=False)
         | 
| 250 | 
            +
             | 
| 251 | 
            +
                u_4 = make_layer(up_types[0], [x_5, u_5, z_id], 512, resample=True, name='16x16')
         | 
| 252 | 
            +
             | 
| 253 | 
            +
                u_3 = make_layer(up_types[1], [x_4, u_4, z_id], 512, resample=True, name='32x32')
         | 
| 254 | 
            +
             | 
| 255 | 
            +
                u_2 = make_layer(up_types[2], [x_3, u_3, z_id], 256, resample=True, name='64x64')
         | 
| 256 | 
            +
             | 
| 257 | 
            +
                u_1 = make_layer(up_types[3], [x_2, u_2, z_id], 128, resample=True, name='128x128')
         | 
| 258 | 
            +
             | 
| 259 | 
            +
                u_0 = make_layer(up_types[4], [x_1, u_1, z_id], 64, resample=True, name='256x256')
         | 
| 260 | 
            +
             | 
| 261 | 
            +
                out = make_layer(up_types[5], [x_0, u_0, z_id], 3, resample=False, name='final')
         | 
| 262 | 
            +
             | 
| 263 | 
            +
                gen_model = Model([x_target, z_source], out)
         | 
| 264 | 
            +
                gen_model.summary()
         | 
| 265 | 
            +
             | 
| 266 | 
            +
                return gen_model
         | 
| 267 | 
            +
             | 
| 268 | 
            +
             | 
| 269 | 
            +
            def get_generator_large(up_types=None, mapping_depth=4, mapping_size=512):
         | 
| 270 | 
            +
             | 
| 271 | 
            +
                if up_types is None:
         | 
| 272 | 
            +
                    up_types = ['no_skip', 'no_skip', 'affa', 'affa', 'affa', 'concat']
         | 
| 273 | 
            +
             | 
| 274 | 
            +
                x_target = Input(shape=(256, 256, 3))
         | 
| 275 | 
            +
                z_source = Input(shape=(512,))
         | 
| 276 | 
            +
             | 
| 277 | 
            +
                z_id = z_source
         | 
| 278 | 
            +
                for m in range(np.max([mapping_depth - 1, 0])):
         | 
| 279 | 
            +
                    z_id = Dense(mapping_size)(z_id)
         | 
| 280 | 
            +
                    z_id = LeakyReLU(0.2)(z_id)
         | 
| 281 | 
            +
                if mapping_depth >= 1:
         | 
| 282 | 
            +
                    z_id = Dense(mapping_size)(z_id)
         | 
| 283 | 
            +
             | 
| 284 | 
            +
                x_0 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x_target)    # 256
         | 
| 285 | 
            +
             | 
| 286 | 
            +
                x_1 = residual_down_block(x_0, 128)                                             # 128
         | 
| 287 | 
            +
             | 
| 288 | 
            +
                x_2 = residual_down_block(x_1, 256)                                             # 64
         | 
| 289 | 
            +
             | 
| 290 | 
            +
                x_3 = residual_down_block(x_2, 512)
         | 
| 291 | 
            +
             | 
| 292 | 
            +
                x_4 = residual_down_block(x_3, 512)
         | 
| 293 | 
            +
             | 
| 294 | 
            +
                x_5 = residual_down_block(x_4, 512)
         | 
| 295 | 
            +
             | 
| 296 | 
            +
                b_0 = residual_up_block([x_5, z_id], 512, resample=False)
         | 
| 297 | 
            +
             | 
| 298 | 
            +
                b_1 = residual_up_block([b_0, z_id], 512, resample=False)
         | 
| 299 | 
            +
             | 
| 300 | 
            +
                b_2 = residual_up_block([b_1, z_id], 512, resample=False)
         | 
| 301 | 
            +
             | 
| 302 | 
            +
                u_5 = residual_up_block([b_2, z_id], 512, resample=False)
         | 
| 303 | 
            +
             | 
| 304 | 
            +
                u_4 = make_layer(up_types[0], [x_5, u_5, z_id], 512, resample=True, name='16x16')
         | 
| 305 | 
            +
             | 
| 306 | 
            +
                u_3 = make_layer(up_types[1], [x_4, u_4, z_id], 512, resample=True, name='32x32')
         | 
| 307 | 
            +
             | 
| 308 | 
            +
                u_2 = make_layer(up_types[2], [x_3, u_3, z_id], 256, resample=True, name='64x64')
         | 
| 309 | 
            +
             | 
| 310 | 
            +
                u_1 = make_layer(up_types[3], [x_2, u_2, z_id], 128, resample=True, name='128x128')
         | 
| 311 | 
            +
             | 
| 312 | 
            +
                u_0 = make_layer(up_types[4], [x_1, u_1, z_id], 64, resample=True, name='256x256')
         | 
| 313 | 
            +
             | 
| 314 | 
            +
                out = make_layer(up_types[5], [x_0, u_0, z_id], 3, resample=False, name='final')
         | 
| 315 | 
            +
             | 
| 316 | 
            +
                gen_model = Model([x_target, z_source], out)
         | 
| 317 | 
            +
                gen_model.summary()
         | 
| 318 | 
            +
             | 
| 319 | 
            +
                return gen_model
         | 
| 320 | 
            +
             | 
| 321 | 
            +
             | 
    	
        networks/layers.py
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        options/__pycache__/swap_options.cpython-37.pyc
    ADDED
    
    | Binary file (6.21 kB). View file | 
|  | 
    	
        options/__pycache__/swap_options.cpython-38.pyc
    ADDED
    
    | Binary file (1.66 kB). View file | 
|  | 
    	
        options/swap_options.py
    ADDED
    
    | @@ -0,0 +1,43 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import argparse
         | 
| 2 | 
            +
             | 
| 3 | 
            +
             | 
| 4 | 
            +
            class SwapOptions():
         | 
| 5 | 
            +
                def __init__(self):
         | 
| 6 | 
            +
                    self.parser = argparse.ArgumentParser()
         | 
| 7 | 
            +
                    self.initialized = False
         | 
| 8 | 
            +
             | 
| 9 | 
            +
                def initialize(self):
         | 
| 10 | 
            +
                    # paths (data, models, etc...)
         | 
| 11 | 
            +
                    self.parser.add_argument('--arcface_path', type=str,
         | 
| 12 | 
            +
                                             default="arcface_model/arcface/arc_res50.h5",
         | 
| 13 | 
            +
                                             help='path to arcface model. Used to extract identity from source.')
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                    # Video/Image necessary models
         | 
| 16 | 
            +
                    self.parser.add_argument('--retina_path', type=str,
         | 
| 17 | 
            +
                                             default="retinaface/retinaface_res50.h5",
         | 
| 18 | 
            +
                                             help='path to retinaface model.')
         | 
| 19 | 
            +
                    self.parser.add_argument('--compare', type=bool,
         | 
| 20 | 
            +
                                             default=True,
         | 
| 21 | 
            +
                                             help='If true, concatenates the frame with the manipulated frame')
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                    self.parser.add_argument('--load', type=int,
         | 
| 24 | 
            +
                                             default=30,
         | 
| 25 | 
            +
                                             help='int of number to load checkpoint weights.')
         | 
| 26 | 
            +
                    self.parser.add_argument('--device_id', type=int, default=0,
         | 
| 27 | 
            +
                                             help='which device to use')
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                    # logging and checkpointing
         | 
| 30 | 
            +
                    self.parser.add_argument('--log_dir', type=str, default='logs/runs/',
         | 
| 31 | 
            +
                                             help='logging directory')
         | 
| 32 | 
            +
                    self.parser.add_argument('--log_name', type=str, default='affa_f',
         | 
| 33 | 
            +
                                             help='name of the run, change this to track several experiments')
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                    self.parser.add_argument('--chkp_dir', type=str, default='checkpoints/',
         | 
| 36 | 
            +
                                             help='checkpoint directory (will use same name as log_name!)')
         | 
| 37 | 
            +
                    self.initialized = True
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                def parse(self):
         | 
| 40 | 
            +
                    if not self.initialized:
         | 
| 41 | 
            +
                        self.initialize()
         | 
| 42 | 
            +
                    self.opt = self.parser.parse_args()
         | 
| 43 | 
            +
                    return self.opt
         | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            tensorflow
         | 
| 2 | 
            +
            tensorflow-addons
         | 
| 3 | 
            +
            opencv-python-headless
         | 
| 4 | 
            +
            scipy
         | 
| 5 | 
            +
            pillow
         | 
| 6 | 
            +
            scikit-image
         | 
    	
        retinaface/__pycache__/anchor.cpython-37.pyc
    ADDED
    
    | Binary file (10.3 kB). View file | 
|  | 
    	
        retinaface/__pycache__/anchor.cpython-38.pyc
    ADDED
    
    | Binary file (10.4 kB). View file | 
|  | 
    	
        retinaface/__pycache__/models.cpython-37.pyc
    ADDED
    
    | Binary file (10.7 kB). View file | 
|  | 
    	
        retinaface/__pycache__/models.cpython-38.pyc
    ADDED
    
    | Binary file (10.4 kB). View file | 
|  | 
    	
        retinaface/__pycache__/ops.cpython-37.pyc
    ADDED
    
    | Binary file (1.02 kB). View file | 
|  | 
    	
        retinaface/anchor.py
    ADDED
    
    | @@ -0,0 +1,296 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """Anchor utils modified from https://github.com/biubug6/Pytorch_Retinaface"""
         | 
| 2 | 
            +
            import math
         | 
| 3 | 
            +
            import tensorflow as tf
         | 
| 4 | 
            +
            import numpy as np
         | 
| 5 | 
            +
            from itertools import product as product
         | 
| 6 | 
            +
             | 
| 7 | 
            +
             | 
| 8 | 
            +
            ###############################################################################
         | 
| 9 | 
            +
            #   Tensorflow / Numpy Priors                                                 #
         | 
| 10 | 
            +
            ###############################################################################
         | 
| 11 | 
            +
            def prior_box(image_sizes, min_sizes, steps, clip=False):
         | 
| 12 | 
            +
                """prior box"""
         | 
| 13 | 
            +
                feature_maps = [
         | 
| 14 | 
            +
                    [math.ceil(image_sizes[0] / step), math.ceil(image_sizes[1] / step)]
         | 
| 15 | 
            +
                    for step in steps]
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                anchors = []
         | 
| 18 | 
            +
                for k, f in enumerate(feature_maps):
         | 
| 19 | 
            +
                    for i, j in product(range(f[0]), range(f[1])):
         | 
| 20 | 
            +
                        for min_size in min_sizes[k]:
         | 
| 21 | 
            +
                            s_kx = min_size / image_sizes[1]
         | 
| 22 | 
            +
                            s_ky = min_size / image_sizes[0]
         | 
| 23 | 
            +
                            cx = (j + 0.5) * steps[k] / image_sizes[1]
         | 
| 24 | 
            +
                            cy = (i + 0.5) * steps[k] / image_sizes[0]
         | 
| 25 | 
            +
                            anchors += [cx, cy, s_kx, s_ky]
         | 
| 26 | 
            +
             | 
| 27 | 
            +
                output = np.asarray(anchors).reshape([-1, 4])
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                if clip:
         | 
| 30 | 
            +
                    output = np.clip(output, 0, 1)
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                return output
         | 
| 33 | 
            +
             | 
| 34 | 
            +
             | 
| 35 | 
            +
            def prior_box_tf(image_sizes, min_sizes, steps, clip=False):
         | 
| 36 | 
            +
                """prior box"""
         | 
| 37 | 
            +
                image_sizes = tf.cast(tf.convert_to_tensor(image_sizes), tf.float32)
         | 
| 38 | 
            +
                feature_maps = tf.math.ceil(
         | 
| 39 | 
            +
                    tf.reshape(image_sizes, [1, 2]) /
         | 
| 40 | 
            +
                    tf.reshape(tf.cast(steps, tf.float32), [-1, 1]))
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                anchors = []
         | 
| 43 | 
            +
                for k in range(len(min_sizes)):
         | 
| 44 | 
            +
                    grid_x, grid_y = _meshgrid_tf(tf.range(feature_maps[k][1]),
         | 
| 45 | 
            +
                                                  tf.range(feature_maps[k][0]))
         | 
| 46 | 
            +
                    cx = (grid_x + 0.5) * steps[k] / image_sizes[1]
         | 
| 47 | 
            +
                    cy = (grid_y + 0.5) * steps[k] / image_sizes[0]
         | 
| 48 | 
            +
                    cxcy = tf.stack([cx, cy], axis=-1)
         | 
| 49 | 
            +
                    cxcy = tf.reshape(cxcy, [-1, 2])
         | 
| 50 | 
            +
                    cxcy = tf.repeat(cxcy, repeats=tf.shape(min_sizes[k])[0], axis=0)
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                    sx = min_sizes[k] / image_sizes[1]
         | 
| 53 | 
            +
                    sy = min_sizes[k] / image_sizes[0]
         | 
| 54 | 
            +
                    sxsy = tf.stack([sx, sy], 1)
         | 
| 55 | 
            +
                    sxsy = tf.repeat(sxsy[tf.newaxis],
         | 
| 56 | 
            +
                                     repeats=tf.shape(grid_x)[0] * tf.shape(grid_x)[1],
         | 
| 57 | 
            +
                                     axis=0)
         | 
| 58 | 
            +
                    sxsy = tf.reshape(sxsy, [-1, 2])
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                    anchors.append(tf.concat([cxcy, sxsy], 1))
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                output = tf.concat(anchors, axis=0)
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                if clip:
         | 
| 65 | 
            +
                    output = tf.clip_by_value(output, 0, 1)
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                return output
         | 
| 68 | 
            +
             | 
| 69 | 
            +
             | 
| 70 | 
            +
            def _meshgrid_tf(x, y):
         | 
| 71 | 
            +
                """ workaround solution of the tf.meshgrid() issue:
         | 
| 72 | 
            +
                    https://github.com/tensorflow/tensorflow/issues/34470"""
         | 
| 73 | 
            +
                grid_shape = [tf.shape(y)[0], tf.shape(x)[0]]
         | 
| 74 | 
            +
                grid_x = tf.broadcast_to(tf.reshape(x, [1, -1]), grid_shape)
         | 
| 75 | 
            +
                grid_y = tf.broadcast_to(tf.reshape(y, [-1, 1]), grid_shape)
         | 
| 76 | 
            +
                return grid_x, grid_y
         | 
| 77 | 
            +
             | 
| 78 | 
            +
             | 
| 79 | 
            +
            ###############################################################################
         | 
| 80 | 
            +
            #   Tensorflow Encoding                                                       #
         | 
| 81 | 
            +
            ###############################################################################
         | 
| 82 | 
            +
            def encode_tf(labels, priors, match_thresh, ignore_thresh,
         | 
| 83 | 
            +
                          variances=[0.1, 0.2]):
         | 
| 84 | 
            +
                """tensorflow encoding"""
         | 
| 85 | 
            +
                assert ignore_thresh <= match_thresh
         | 
| 86 | 
            +
                priors = tf.cast(priors, tf.float32)
         | 
| 87 | 
            +
                bbox = labels[:, :4]
         | 
| 88 | 
            +
                landm = labels[:, 4:-1]
         | 
| 89 | 
            +
                landm_valid = labels[:, -1]  # 1: with landm, 0: w/o landm.
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                # jaccard index
         | 
| 92 | 
            +
                overlaps = _jaccard(bbox, _point_form(priors))
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                # (Bipartite Matching)
         | 
| 95 | 
            +
                # [num_objects] best prior for each ground truth
         | 
| 96 | 
            +
                best_prior_overlap, best_prior_idx = tf.math.top_k(overlaps, k=1)
         | 
| 97 | 
            +
                best_prior_overlap = best_prior_overlap[:, 0]
         | 
| 98 | 
            +
                best_prior_idx = best_prior_idx[:, 0]
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                # [num_priors] best ground truth for each prior
         | 
| 101 | 
            +
                overlaps_t = tf.transpose(overlaps)
         | 
| 102 | 
            +
                best_truth_overlap, best_truth_idx = tf.math.top_k(overlaps_t, k=1)
         | 
| 103 | 
            +
                best_truth_overlap = best_truth_overlap[:, 0]
         | 
| 104 | 
            +
                best_truth_idx = best_truth_idx[:, 0]
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                # ensure best prior
         | 
| 107 | 
            +
                def _loop_body(i, bt_idx, bt_overlap):
         | 
| 108 | 
            +
                    bp_mask = tf.one_hot(best_prior_idx[i], tf.shape(bt_idx)[0])
         | 
| 109 | 
            +
                    bp_mask_int = tf.cast(bp_mask, tf.int32)
         | 
| 110 | 
            +
                    new_bt_idx = bt_idx * (1 - bp_mask_int) + bp_mask_int * i
         | 
| 111 | 
            +
                    bp_mask_float = tf.cast(bp_mask, tf.float32)
         | 
| 112 | 
            +
                    new_bt_overlap = bt_overlap * (1 - bp_mask_float) + bp_mask_float * 2
         | 
| 113 | 
            +
                    return tf.cond(best_prior_overlap[i] > match_thresh,
         | 
| 114 | 
            +
                                   lambda: (i + 1, new_bt_idx, new_bt_overlap),
         | 
| 115 | 
            +
                                   lambda: (i + 1, bt_idx, bt_overlap))
         | 
| 116 | 
            +
                _, best_truth_idx, best_truth_overlap = tf.while_loop(
         | 
| 117 | 
            +
                    lambda i, bt_idx, bt_overlap: tf.less(i, tf.shape(best_prior_idx)[0]),
         | 
| 118 | 
            +
                    _loop_body, [tf.constant(0), best_truth_idx, best_truth_overlap])
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                matches_bbox = tf.gather(bbox, best_truth_idx)  # [num_priors, 4]
         | 
| 121 | 
            +
                matches_landm = tf.gather(landm, best_truth_idx)  # [num_priors, 10]
         | 
| 122 | 
            +
                matches_landm_v = tf.gather(landm_valid, best_truth_idx)  # [num_priors]
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                loc_t = _encode_bbox(matches_bbox, priors, variances)
         | 
| 125 | 
            +
                landm_t = _encode_landm(matches_landm, priors, variances)
         | 
| 126 | 
            +
                landm_valid_t = tf.cast(matches_landm_v > 0, tf.float32)
         | 
| 127 | 
            +
                conf_t = tf.cast(best_truth_overlap > match_thresh, tf.float32)
         | 
| 128 | 
            +
                conf_t = tf.where(
         | 
| 129 | 
            +
                    tf.logical_and(best_truth_overlap < match_thresh,
         | 
| 130 | 
            +
                                   best_truth_overlap > ignore_thresh),
         | 
| 131 | 
            +
                    tf.ones_like(conf_t) * -1, conf_t)    # 1: pos, 0: neg, -1: ignore
         | 
| 132 | 
            +
             | 
| 133 | 
            +
                return tf.concat([loc_t, landm_t, landm_valid_t[..., tf.newaxis],
         | 
| 134 | 
            +
                                  conf_t[..., tf.newaxis]], axis=1)
         | 
| 135 | 
            +
             | 
| 136 | 
            +
             | 
| 137 | 
            +
            def _encode_bbox(matched, priors, variances):
         | 
| 138 | 
            +
                """Encode the variances from the priorbox layers into the ground truth
         | 
| 139 | 
            +
                boxes we have matched (based on jaccard overlap) with the prior boxes.
         | 
| 140 | 
            +
                Args:
         | 
| 141 | 
            +
                    matched: (tensor) Coords of ground truth for each prior in point-form
         | 
| 142 | 
            +
                        Shape: [num_priors, 4].
         | 
| 143 | 
            +
                    priors: (tensor) Prior boxes in center-offset form
         | 
| 144 | 
            +
                        Shape: [num_priors,4].
         | 
| 145 | 
            +
                    variances: (list[float]) Variances of priorboxes
         | 
| 146 | 
            +
                Return:
         | 
| 147 | 
            +
                    encoded boxes (tensor), Shape: [num_priors, 4]
         | 
| 148 | 
            +
                """
         | 
| 149 | 
            +
             | 
| 150 | 
            +
                # dist b/t match center and prior's center
         | 
| 151 | 
            +
                g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
         | 
| 152 | 
            +
                # encode variance
         | 
| 153 | 
            +
                g_cxcy /= (variances[0] * priors[:, 2:])
         | 
| 154 | 
            +
                # match wh / prior wh
         | 
| 155 | 
            +
                g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
         | 
| 156 | 
            +
                g_wh = tf.math.log(g_wh) / variances[1]
         | 
| 157 | 
            +
                # return target for smooth_l1_loss
         | 
| 158 | 
            +
                return tf.concat([g_cxcy, g_wh], 1)  # [num_priors,4]
         | 
| 159 | 
            +
             | 
| 160 | 
            +
             | 
| 161 | 
            +
            def _encode_landm(matched, priors, variances):
         | 
| 162 | 
            +
                """Encode the variances from the priorbox layers into the ground truth
         | 
| 163 | 
            +
                boxes we have matched (based on jaccard overlap) with the prior boxes.
         | 
| 164 | 
            +
                Args:
         | 
| 165 | 
            +
                    matched: (tensor) Coords of ground truth for each prior in point-form
         | 
| 166 | 
            +
                        Shape: [num_priors, 10].
         | 
| 167 | 
            +
                    priors: (tensor) Prior boxes in center-offset form
         | 
| 168 | 
            +
                        Shape: [num_priors,4].
         | 
| 169 | 
            +
                    variances: (list[float]) Variances of priorboxes
         | 
| 170 | 
            +
                Return:
         | 
| 171 | 
            +
                    encoded landm (tensor), Shape: [num_priors, 10]
         | 
| 172 | 
            +
                """
         | 
| 173 | 
            +
             | 
| 174 | 
            +
                # dist b/t match center and prior's center
         | 
| 175 | 
            +
                matched = tf.reshape(matched, [tf.shape(matched)[0], 5, 2])
         | 
| 176 | 
            +
                priors = tf.broadcast_to(
         | 
| 177 | 
            +
                    tf.expand_dims(priors, 1), [tf.shape(matched)[0], 5, 4])
         | 
| 178 | 
            +
                g_cxcy = matched[:, :, :2] - priors[:, :, :2]
         | 
| 179 | 
            +
                # encode variance
         | 
| 180 | 
            +
                g_cxcy /= (variances[0] * priors[:, :, 2:])
         | 
| 181 | 
            +
                # g_cxcy /= priors[:, :, 2:]
         | 
| 182 | 
            +
                g_cxcy = tf.reshape(g_cxcy, [tf.shape(g_cxcy)[0], -1])
         | 
| 183 | 
            +
                # return target for smooth_l1_loss
         | 
| 184 | 
            +
                return g_cxcy
         | 
| 185 | 
            +
             | 
| 186 | 
            +
             | 
| 187 | 
            +
            def _point_form(boxes):
         | 
| 188 | 
            +
                """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
         | 
| 189 | 
            +
                representation for comparison to point form ground truth data.
         | 
| 190 | 
            +
                Args:
         | 
| 191 | 
            +
                    boxes: (tensor) center-size default boxes from priorbox layers.
         | 
| 192 | 
            +
                Return:
         | 
| 193 | 
            +
                    boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
         | 
| 194 | 
            +
                """
         | 
| 195 | 
            +
                return tf.concat((boxes[:, :2] - boxes[:, 2:] / 2,
         | 
| 196 | 
            +
                                  boxes[:, :2] + boxes[:, 2:] / 2), axis=1)
         | 
| 197 | 
            +
             | 
| 198 | 
            +
             | 
| 199 | 
            +
            def _intersect(box_a, box_b):
         | 
| 200 | 
            +
                """ We resize both tensors to [A,B,2]:
         | 
| 201 | 
            +
                [A,2] -> [A,1,2] -> [A,B,2]
         | 
| 202 | 
            +
                [B,2] -> [1,B,2] -> [A,B,2]
         | 
| 203 | 
            +
                Then we compute the area of intersect between box_a and box_b.
         | 
| 204 | 
            +
                Args:
         | 
| 205 | 
            +
                  box_a: (tensor) bounding boxes, Shape: [A,4].
         | 
| 206 | 
            +
                  box_b: (tensor) bounding boxes, Shape: [B,4].
         | 
| 207 | 
            +
                Return:
         | 
| 208 | 
            +
                  (tensor) intersection area, Shape: [A,B].
         | 
| 209 | 
            +
                """
         | 
| 210 | 
            +
                A = tf.shape(box_a)[0]
         | 
| 211 | 
            +
                B = tf.shape(box_b)[0]
         | 
| 212 | 
            +
                max_xy = tf.minimum(
         | 
| 213 | 
            +
                    tf.broadcast_to(tf.expand_dims(box_a[:, 2:], 1), [A, B, 2]),
         | 
| 214 | 
            +
                    tf.broadcast_to(tf.expand_dims(box_b[:, 2:], 0), [A, B, 2]))
         | 
| 215 | 
            +
                min_xy = tf.maximum(
         | 
| 216 | 
            +
                    tf.broadcast_to(tf.expand_dims(box_a[:, :2], 1), [A, B, 2]),
         | 
| 217 | 
            +
                    tf.broadcast_to(tf.expand_dims(box_b[:, :2], 0), [A, B, 2]))
         | 
| 218 | 
            +
                inter = tf.maximum((max_xy - min_xy), tf.zeros_like(max_xy - min_xy))
         | 
| 219 | 
            +
                return inter[:, :, 0] * inter[:, :, 1]
         | 
| 220 | 
            +
             | 
| 221 | 
            +
             | 
| 222 | 
            +
            def _jaccard(box_a, box_b):
         | 
| 223 | 
            +
                """Compute the jaccard overlap of two sets of boxes.  The jaccard overlap
         | 
| 224 | 
            +
                is simply the intersection over union of two boxes.  Here we operate on
         | 
| 225 | 
            +
                ground truth boxes and default boxes.
         | 
| 226 | 
            +
                E.g.:
         | 
| 227 | 
            +
                    A β© B / A βͺ B = A β© B / (area(A) + area(B) - A β© B)
         | 
| 228 | 
            +
                Args:
         | 
| 229 | 
            +
                    box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
         | 
| 230 | 
            +
                    box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
         | 
| 231 | 
            +
                Return:
         | 
| 232 | 
            +
                    jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
         | 
| 233 | 
            +
                """
         | 
| 234 | 
            +
                inter = _intersect(box_a, box_b)
         | 
| 235 | 
            +
                area_a = tf.broadcast_to(
         | 
| 236 | 
            +
                    tf.expand_dims(
         | 
| 237 | 
            +
                        (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]), 1),
         | 
| 238 | 
            +
                    tf.shape(inter))  # [A,B]
         | 
| 239 | 
            +
                area_b = tf.broadcast_to(
         | 
| 240 | 
            +
                    tf.expand_dims(
         | 
| 241 | 
            +
                        (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]), 0),
         | 
| 242 | 
            +
                    tf.shape(inter))  # [A,B]
         | 
| 243 | 
            +
                union = area_a + area_b - inter
         | 
| 244 | 
            +
                return inter / union  # [A,B]
         | 
| 245 | 
            +
             | 
| 246 | 
            +
             | 
| 247 | 
            +
            ###############################################################################
         | 
| 248 | 
            +
            #   Tensorflow Decoding                                                       #
         | 
| 249 | 
            +
            ###############################################################################
         | 
| 250 | 
            +
            def decode_tf(labels, priors, variances=[0.1, 0.2]):
         | 
| 251 | 
            +
                """tensorflow decoding"""
         | 
| 252 | 
            +
                bbox = _decode_bbox(labels[:, :4], priors, variances)
         | 
| 253 | 
            +
                landm = _decode_landm(labels[:, 4:14], priors, variances)
         | 
| 254 | 
            +
                landm_valid = labels[:, 14][:, tf.newaxis]
         | 
| 255 | 
            +
                conf = labels[:, 15][:, tf.newaxis]
         | 
| 256 | 
            +
             | 
| 257 | 
            +
                return tf.concat([bbox, landm, landm_valid, conf], axis=1)
         | 
| 258 | 
            +
             | 
| 259 | 
            +
             | 
| 260 | 
            +
            def _decode_bbox(pre, priors, variances=[0.1, 0.2]):
         | 
| 261 | 
            +
                """Decode locations from predictions using priors to undo
         | 
| 262 | 
            +
                the encoding we did for offset regression at train time.
         | 
| 263 | 
            +
                Args:
         | 
| 264 | 
            +
                    pre (tensor): location predictions for loc layers,
         | 
| 265 | 
            +
                        Shape: [num_priors,4]
         | 
| 266 | 
            +
                    priors (tensor): Prior boxes in center-offset form.
         | 
| 267 | 
            +
                        Shape: [num_priors,4].
         | 
| 268 | 
            +
                    variances: (list[float]) Variances of priorboxes
         | 
| 269 | 
            +
                Return:
         | 
| 270 | 
            +
                    decoded bounding box predictions
         | 
| 271 | 
            +
                """
         | 
| 272 | 
            +
                centers = priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:]
         | 
| 273 | 
            +
                sides = priors[:, 2:] * tf.math.exp(pre[:, 2:] * variances[1])
         | 
| 274 | 
            +
             | 
| 275 | 
            +
                return tf.concat([centers - sides / 2, centers + sides / 2], axis=1)
         | 
| 276 | 
            +
             | 
| 277 | 
            +
             | 
| 278 | 
            +
            def _decode_landm(pre, priors, variances=[0.1, 0.2]):
         | 
| 279 | 
            +
                """Decode landm from predictions using priors to undo
         | 
| 280 | 
            +
                the encoding we did for offset regression at train time.
         | 
| 281 | 
            +
                Args:
         | 
| 282 | 
            +
                    pre (tensor): landm predictions for loc layers,
         | 
| 283 | 
            +
                        Shape: [num_priors,10]
         | 
| 284 | 
            +
                    priors (tensor): Prior boxes in center-offset form.
         | 
| 285 | 
            +
                        Shape: [num_priors,4].
         | 
| 286 | 
            +
                    variances: (list[float]) Variances of priorboxes
         | 
| 287 | 
            +
                Return:
         | 
| 288 | 
            +
                    decoded landm predictions
         | 
| 289 | 
            +
                """
         | 
| 290 | 
            +
                landms = tf.concat(
         | 
| 291 | 
            +
                    [priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
         | 
| 292 | 
            +
                     priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
         | 
| 293 | 
            +
                     priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
         | 
| 294 | 
            +
                     priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
         | 
| 295 | 
            +
                     priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:]], axis=1)
         | 
| 296 | 
            +
                return landms
         | 
    	
        retinaface/models.py
    ADDED
    
    | @@ -0,0 +1,301 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import tensorflow as tf
         | 
| 2 | 
            +
            from tensorflow.keras import Model
         | 
| 3 | 
            +
            from tensorflow.keras.applications import MobileNetV2, ResNet50
         | 
| 4 | 
            +
            from tensorflow.keras.layers import Input, Conv2D, ReLU, LeakyReLU
         | 
| 5 | 
            +
            from retinaface.anchor import decode_tf, prior_box_tf
         | 
| 6 | 
            +
             | 
| 7 | 
            +
             | 
| 8 | 
            +
            def _regularizer(weights_decay):
         | 
| 9 | 
            +
                """l2 regularizer"""
         | 
| 10 | 
            +
                return tf.keras.regularizers.l2(weights_decay)
         | 
| 11 | 
            +
             | 
| 12 | 
            +
             | 
| 13 | 
            +
            def _kernel_init(scale=1.0, seed=None):
         | 
| 14 | 
            +
                """He normal initializer"""
         | 
| 15 | 
            +
                return tf.keras.initializers.he_normal()
         | 
| 16 | 
            +
             | 
| 17 | 
            +
             | 
| 18 | 
            +
            class BatchNormalization(tf.keras.layers.BatchNormalization):
         | 
| 19 | 
            +
                """Make trainable=False freeze BN for real (the og version is sad).
         | 
| 20 | 
            +
                   ref: https://github.com/zzh8829/yolov3-tf2
         | 
| 21 | 
            +
                """
         | 
| 22 | 
            +
                def __init__(self, axis=-1, momentum=0.9, epsilon=1e-5, center=True,
         | 
| 23 | 
            +
                             scale=True, name=None, **kwargs):
         | 
| 24 | 
            +
                    super(BatchNormalization, self).__init__(
         | 
| 25 | 
            +
                        axis=axis, momentum=momentum, epsilon=epsilon, center=center,
         | 
| 26 | 
            +
                        scale=scale, name=name, **kwargs)
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                def call(self, x, training=False):
         | 
| 29 | 
            +
                    if training is None:
         | 
| 30 | 
            +
                        training = tf.constant(False)
         | 
| 31 | 
            +
                    training = tf.logical_and(training, self.trainable)
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                    return super().call(x, training)
         | 
| 34 | 
            +
             | 
| 35 | 
            +
             | 
| 36 | 
            +
            def Backbone(backbone_type='ResNet50', use_pretrain=True):
         | 
| 37 | 
            +
                """Backbone Model"""
         | 
| 38 | 
            +
                weights = None
         | 
| 39 | 
            +
                if use_pretrain:
         | 
| 40 | 
            +
                    weights = 'imagenet'
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                def backbone(x):
         | 
| 43 | 
            +
                    if backbone_type == 'ResNet50':
         | 
| 44 | 
            +
                        extractor = ResNet50(
         | 
| 45 | 
            +
                            input_shape=x.shape[1:], include_top=False, weights=weights)
         | 
| 46 | 
            +
                        pick_layer1 = 80  # [80, 80, 512]
         | 
| 47 | 
            +
                        pick_layer2 = 142  # [40, 40, 1024]
         | 
| 48 | 
            +
                        pick_layer3 = 174  # [20, 20, 2048]
         | 
| 49 | 
            +
                        preprocess = tf.keras.applications.resnet.preprocess_input
         | 
| 50 | 
            +
                    elif backbone_type == 'MobileNetV2':
         | 
| 51 | 
            +
                        extractor = MobileNetV2(
         | 
| 52 | 
            +
                            input_shape=x.shape[1:], include_top=False, weights=weights)
         | 
| 53 | 
            +
                        pick_layer1 = 54  # [80, 80, 32]
         | 
| 54 | 
            +
                        pick_layer2 = 116  # [40, 40, 96]
         | 
| 55 | 
            +
                        pick_layer3 = 143  # [20, 20, 160]
         | 
| 56 | 
            +
                        preprocess = tf.keras.applications.mobilenet_v2.preprocess_input
         | 
| 57 | 
            +
                    else:
         | 
| 58 | 
            +
                        raise NotImplementedError(
         | 
| 59 | 
            +
                            'Backbone type {} is not recognized.'.format(backbone_type))
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                    return Model(extractor.input,
         | 
| 62 | 
            +
                                 (extractor.layers[pick_layer1].output,
         | 
| 63 | 
            +
                                  extractor.layers[pick_layer2].output,
         | 
| 64 | 
            +
                                  extractor.layers[pick_layer3].output),
         | 
| 65 | 
            +
                                 name=backbone_type + '_extrator')(preprocess(x))
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                return backbone
         | 
| 68 | 
            +
             | 
| 69 | 
            +
             | 
| 70 | 
            +
            class ConvUnit(tf.keras.layers.Layer):
         | 
| 71 | 
            +
                """Conv + BN + Act"""
         | 
| 72 | 
            +
                def __init__(self, f, k, s, wd, act=None, **kwargs):
         | 
| 73 | 
            +
                    super(ConvUnit, self).__init__(**kwargs)
         | 
| 74 | 
            +
                    self.conv = Conv2D(filters=f, kernel_size=k, strides=s, padding='same',
         | 
| 75 | 
            +
                                       kernel_initializer=_kernel_init(),
         | 
| 76 | 
            +
                                       kernel_regularizer=_regularizer(wd),
         | 
| 77 | 
            +
                                       use_bias=False)
         | 
| 78 | 
            +
                    self.bn = BatchNormalization()
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                    if act is None:
         | 
| 81 | 
            +
                        self.act_fn = tf.identity
         | 
| 82 | 
            +
                    elif act == 'relu':
         | 
| 83 | 
            +
                        self.act_fn = ReLU()
         | 
| 84 | 
            +
                    elif act == 'lrelu':
         | 
| 85 | 
            +
                        self.act_fn = LeakyReLU(0.1)
         | 
| 86 | 
            +
                    else:
         | 
| 87 | 
            +
                        raise NotImplementedError(
         | 
| 88 | 
            +
                            'Activation function type {} is not recognized.'.format(act))
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                def call(self, x):
         | 
| 91 | 
            +
                    return self.act_fn(self.bn(self.conv(x)))
         | 
| 92 | 
            +
             | 
| 93 | 
            +
             | 
| 94 | 
            +
            class FPN(tf.keras.layers.Layer):
         | 
| 95 | 
            +
                """Feature Pyramid Network"""
         | 
| 96 | 
            +
                def __init__(self, out_ch, wd, **kwargs):
         | 
| 97 | 
            +
                    super(FPN, self).__init__(**kwargs)
         | 
| 98 | 
            +
                    act = 'relu'
         | 
| 99 | 
            +
                    self.out_ch = out_ch
         | 
| 100 | 
            +
                    self.wd = wd
         | 
| 101 | 
            +
                    if (out_ch <= 64):
         | 
| 102 | 
            +
                        act = 'lrelu'
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                    self.output1 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
         | 
| 105 | 
            +
                    self.output2 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
         | 
| 106 | 
            +
                    self.output3 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
         | 
| 107 | 
            +
                    self.merge1 = ConvUnit(f=out_ch, k=3, s=1, wd=wd, act=act)
         | 
| 108 | 
            +
                    self.merge2 = ConvUnit(f=out_ch, k=3, s=1, wd=wd, act=act)
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                def call(self, x):
         | 
| 111 | 
            +
                    output1 = self.output1(x[0])  # [80, 80, out_ch]
         | 
| 112 | 
            +
                    output2 = self.output2(x[1])  # [40, 40, out_ch]
         | 
| 113 | 
            +
                    output3 = self.output3(x[2])  # [20, 20, out_ch]
         | 
| 114 | 
            +
             | 
| 115 | 
            +
                    up_h, up_w = tf.shape(output2)[1], tf.shape(output2)[2]
         | 
| 116 | 
            +
                    up3 = tf.image.resize(output3, [up_h, up_w], method='nearest')
         | 
| 117 | 
            +
                    output2 = output2 + up3
         | 
| 118 | 
            +
                    output2 = self.merge2(output2)
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                    up_h, up_w = tf.shape(output1)[1], tf.shape(output1)[2]
         | 
| 121 | 
            +
                    up2 = tf.image.resize(output2, [up_h, up_w], method='nearest')
         | 
| 122 | 
            +
                    output1 = output1 + up2
         | 
| 123 | 
            +
                    output1 = self.merge1(output1)
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                    return output1, output2, output3
         | 
| 126 | 
            +
                
         | 
| 127 | 
            +
                def get_config(self):
         | 
| 128 | 
            +
                    config = {
         | 
| 129 | 
            +
                        'out_ch': self.out_ch,
         | 
| 130 | 
            +
                        'wd': self.wd,
         | 
| 131 | 
            +
                    }
         | 
| 132 | 
            +
                    base_config = super(FPN, self).get_config()
         | 
| 133 | 
            +
                    return dict(list(base_config.items()) + list(config.items()))
         | 
| 134 | 
            +
             | 
| 135 | 
            +
             | 
| 136 | 
            +
            class SSH(tf.keras.layers.Layer):
         | 
| 137 | 
            +
                """Single Stage Headless Layer"""
         | 
| 138 | 
            +
                def __init__(self, out_ch, wd, **kwargs):
         | 
| 139 | 
            +
                    super(SSH, self).__init__(**kwargs)
         | 
| 140 | 
            +
                    assert out_ch % 4 == 0
         | 
| 141 | 
            +
                    self.out_ch = out_ch
         | 
| 142 | 
            +
                    self.wd = wd
         | 
| 143 | 
            +
                    act = 'relu'
         | 
| 144 | 
            +
                    if (out_ch <= 64):
         | 
| 145 | 
            +
                        act = 'lrelu'
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                    self.conv_3x3 = ConvUnit(f=out_ch // 2, k=3, s=1, wd=wd, act=None)
         | 
| 148 | 
            +
             | 
| 149 | 
            +
                    self.conv_5x5_1 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=act)
         | 
| 150 | 
            +
                    self.conv_5x5_2 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=None)
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                    self.conv_7x7_2 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=act)
         | 
| 153 | 
            +
                    self.conv_7x7_3 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=None)
         | 
| 154 | 
            +
             | 
| 155 | 
            +
                    self.relu = ReLU()
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                def call(self, x):
         | 
| 158 | 
            +
                    conv_3x3 = self.conv_3x3(x)
         | 
| 159 | 
            +
             | 
| 160 | 
            +
                    conv_5x5_1 = self.conv_5x5_1(x)
         | 
| 161 | 
            +
                    conv_5x5 = self.conv_5x5_2(conv_5x5_1)
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                    conv_7x7_2 = self.conv_7x7_2(conv_5x5_1)
         | 
| 164 | 
            +
                    conv_7x7 = self.conv_7x7_3(conv_7x7_2)
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                    output = tf.concat([conv_3x3, conv_5x5, conv_7x7], axis=3)
         | 
| 167 | 
            +
                    output = self.relu(output)
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                    return output
         | 
| 170 | 
            +
                
         | 
| 171 | 
            +
                def get_config(self):
         | 
| 172 | 
            +
                    config = {
         | 
| 173 | 
            +
                        'out_ch': self.out_ch,
         | 
| 174 | 
            +
                        'wd': self.wd,
         | 
| 175 | 
            +
                    }
         | 
| 176 | 
            +
                    base_config = super(SSH, self).get_config()
         | 
| 177 | 
            +
                    return dict(list(base_config.items()) + list(config.items()))
         | 
| 178 | 
            +
             | 
| 179 | 
            +
             | 
| 180 | 
            +
            class BboxHead(tf.keras.layers.Layer):
         | 
| 181 | 
            +
                """Bbox Head Layer"""
         | 
| 182 | 
            +
                def __init__(self, num_anchor, wd, **kwargs):
         | 
| 183 | 
            +
                    super(BboxHead, self).__init__(**kwargs)
         | 
| 184 | 
            +
                    self.num_anchor = num_anchor
         | 
| 185 | 
            +
                    self.wd = wd
         | 
| 186 | 
            +
                    self.conv = Conv2D(filters=num_anchor * 4, kernel_size=1, strides=1)
         | 
| 187 | 
            +
             | 
| 188 | 
            +
                def call(self, x):
         | 
| 189 | 
            +
                    h, w = tf.shape(x)[1], tf.shape(x)[2]
         | 
| 190 | 
            +
                    x = self.conv(x)
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                    return tf.reshape(x, [-1, h * w * self.num_anchor, 4])
         | 
| 193 | 
            +
                
         | 
| 194 | 
            +
                def get_config(self):
         | 
| 195 | 
            +
                    config = {
         | 
| 196 | 
            +
                        'num_anchor': self.num_anchor,
         | 
| 197 | 
            +
                        'wd': self.wd,
         | 
| 198 | 
            +
                    }
         | 
| 199 | 
            +
                    base_config = super(BboxHead, self).get_config()
         | 
| 200 | 
            +
                    return dict(list(base_config.items()) + list(config.items()))
         | 
| 201 | 
            +
             | 
| 202 | 
            +
             | 
| 203 | 
            +
            class LandmarkHead(tf.keras.layers.Layer):
         | 
| 204 | 
            +
                """Landmark Head Layer"""
         | 
| 205 | 
            +
                def __init__(self, num_anchor, wd, name='LandmarkHead', **kwargs):
         | 
| 206 | 
            +
                    super(LandmarkHead, self).__init__(name=name, **kwargs)
         | 
| 207 | 
            +
                    self.num_anchor = num_anchor
         | 
| 208 | 
            +
                    self.wd = wd
         | 
| 209 | 
            +
                    self.conv = Conv2D(filters=num_anchor * 10, kernel_size=1, strides=1)
         | 
| 210 | 
            +
             | 
| 211 | 
            +
                def call(self, x):
         | 
| 212 | 
            +
                    h, w = tf.shape(x)[1], tf.shape(x)[2]
         | 
| 213 | 
            +
                    x = self.conv(x)
         | 
| 214 | 
            +
             | 
| 215 | 
            +
                    return tf.reshape(x, [-1, h * w * self.num_anchor, 10])
         | 
| 216 | 
            +
             | 
| 217 | 
            +
                def get_config(self):
         | 
| 218 | 
            +
                    config = {
         | 
| 219 | 
            +
                        'num_anchor': self.num_anchor,
         | 
| 220 | 
            +
                        'wd': self.wd,
         | 
| 221 | 
            +
                    }
         | 
| 222 | 
            +
                    base_config = super(LandmarkHead, self).get_config()
         | 
| 223 | 
            +
                    return dict(list(base_config.items()) + list(config.items()))
         | 
| 224 | 
            +
             | 
| 225 | 
            +
             | 
| 226 | 
            +
            class ClassHead(tf.keras.layers.Layer):
         | 
| 227 | 
            +
                """Class Head Layer"""
         | 
| 228 | 
            +
                def __init__(self, num_anchor, wd, name='ClassHead', **kwargs):
         | 
| 229 | 
            +
                    super(ClassHead, self).__init__(name=name, **kwargs)
         | 
| 230 | 
            +
                    self.num_anchor = num_anchor
         | 
| 231 | 
            +
                    self.wd = wd
         | 
| 232 | 
            +
                    self.conv = Conv2D(filters=num_anchor * 2, kernel_size=1, strides=1)
         | 
| 233 | 
            +
             | 
| 234 | 
            +
                def call(self, x):
         | 
| 235 | 
            +
                    h, w = tf.shape(x)[1], tf.shape(x)[2]
         | 
| 236 | 
            +
                    x = self.conv(x)
         | 
| 237 | 
            +
             | 
| 238 | 
            +
                    return tf.reshape(x, [-1, h * w * self.num_anchor, 2])
         | 
| 239 | 
            +
             | 
| 240 | 
            +
                def get_config(self):
         | 
| 241 | 
            +
                    config = {
         | 
| 242 | 
            +
                        'num_anchor': self.num_anchor,
         | 
| 243 | 
            +
                        'wd': self.wd,
         | 
| 244 | 
            +
                    }
         | 
| 245 | 
            +
                    base_config = super(ClassHead, self).get_config()
         | 
| 246 | 
            +
                    return dict(list(base_config.items()) + list(config.items()))
         | 
| 247 | 
            +
             | 
| 248 | 
            +
             | 
| 249 | 
            +
            def RetinaFaceModel(cfg, training=False, iou_th=0.4, score_th=0.02,
         | 
| 250 | 
            +
                                name='RetinaFaceModel'):
         | 
| 251 | 
            +
                """Retina Face Model"""
         | 
| 252 | 
            +
                input_size = cfg['input_size'] if training else None
         | 
| 253 | 
            +
                wd = cfg['weights_decay']
         | 
| 254 | 
            +
                out_ch = cfg['out_channel']
         | 
| 255 | 
            +
                num_anchor = len(cfg['min_sizes'][0])
         | 
| 256 | 
            +
                backbone_type = cfg['backbone_type']
         | 
| 257 | 
            +
             | 
| 258 | 
            +
                # define model
         | 
| 259 | 
            +
                x = inputs = Input([input_size, input_size, 3], name='input_image')
         | 
| 260 | 
            +
             | 
| 261 | 
            +
                x = Backbone(backbone_type=backbone_type)(x)
         | 
| 262 | 
            +
             | 
| 263 | 
            +
                fpn = FPN(out_ch=out_ch, wd=wd)(x)
         | 
| 264 | 
            +
             | 
| 265 | 
            +
                features = [SSH(out_ch=out_ch, wd=wd)(f)
         | 
| 266 | 
            +
                            for i, f in enumerate(fpn)]
         | 
| 267 | 
            +
             | 
| 268 | 
            +
                bbox_regressions = tf.concat(
         | 
| 269 | 
            +
                    [BboxHead(num_anchor, wd=wd)(f)
         | 
| 270 | 
            +
                     for i, f in enumerate(features)], axis=1)
         | 
| 271 | 
            +
                landm_regressions = tf.concat(
         | 
| 272 | 
            +
                    [LandmarkHead(num_anchor, wd=wd, name=f'LandmarkHead_{i}')(f)
         | 
| 273 | 
            +
                     for i, f in enumerate(features)], axis=1)
         | 
| 274 | 
            +
                classifications = tf.concat(
         | 
| 275 | 
            +
                    [ClassHead(num_anchor, wd=wd, name=f'ClassHead_{i}')(f)
         | 
| 276 | 
            +
                     for i, f in enumerate(features)], axis=1)
         | 
| 277 | 
            +
             | 
| 278 | 
            +
                classifications = tf.keras.layers.Softmax(axis=-1)(classifications)
         | 
| 279 | 
            +
             | 
| 280 | 
            +
                if training:
         | 
| 281 | 
            +
                    out = (bbox_regressions, landm_regressions, classifications)
         | 
| 282 | 
            +
                else:
         | 
| 283 | 
            +
                    # only for batch size 1
         | 
| 284 | 
            +
                    preds = tf.concat(  # [bboxes, landms, landms_valid, conf]
         | 
| 285 | 
            +
                        [bbox_regressions[0],
         | 
| 286 | 
            +
                         landm_regressions[0],
         | 
| 287 | 
            +
                         tf.ones_like(classifications[0, :, 0][..., tf.newaxis]),
         | 
| 288 | 
            +
                         classifications[0, :, 1][..., tf.newaxis]], 1)
         | 
| 289 | 
            +
                    priors = prior_box_tf((tf.shape(inputs)[1], tf.shape(inputs)[2]), cfg['min_sizes'], cfg['steps'], cfg['clip'])
         | 
| 290 | 
            +
                    decode_preds = decode_tf(preds, priors, cfg['variances'])
         | 
| 291 | 
            +
             | 
| 292 | 
            +
                    selected_indices = tf.image.non_max_suppression(
         | 
| 293 | 
            +
                        boxes=decode_preds[:, :4],
         | 
| 294 | 
            +
                        scores=decode_preds[:, -1],
         | 
| 295 | 
            +
                        max_output_size=tf.shape(decode_preds)[0],
         | 
| 296 | 
            +
                        iou_threshold=iou_th,
         | 
| 297 | 
            +
                        score_threshold=score_th)
         | 
| 298 | 
            +
             | 
| 299 | 
            +
                    out = tf.gather(decode_preds, selected_indices)
         | 
| 300 | 
            +
             | 
| 301 | 
            +
                return Model(inputs, out, name=name), Model(inputs, [bbox_regressions, landm_regressions, classifications], name=name + '_bb_only')
         | 
    	
        retinaface/ops.py
    ADDED
    
    | @@ -0,0 +1,27 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from retinaface.anchor import decode_tf, prior_box_tf
         | 
| 2 | 
            +
            import tensorflow as tf
         | 
| 3 | 
            +
             | 
| 4 | 
            +
             | 
| 5 | 
            +
            def extract_detections(bbox_regressions, landm_regressions, classifications, image_sizes, iou_th=0.4, score_th=0.02):
         | 
| 6 | 
            +
                min_sizes = [[16, 32], [64, 128], [256, 512]]
         | 
| 7 | 
            +
                steps = [8, 16, 32]
         | 
| 8 | 
            +
                variances = [0.1, 0.2]
         | 
| 9 | 
            +
                preds = tf.concat(  # [bboxes, landms, landms_valid, conf]
         | 
| 10 | 
            +
                    [bbox_regressions,
         | 
| 11 | 
            +
                     landm_regressions,
         | 
| 12 | 
            +
                     tf.ones_like(classifications[:, 0][..., tf.newaxis]),
         | 
| 13 | 
            +
                     classifications[:, 1][..., tf.newaxis]], 1)
         | 
| 14 | 
            +
                priors = prior_box_tf(image_sizes, min_sizes, steps, False)
         | 
| 15 | 
            +
                decode_preds = decode_tf(preds, priors, variances)
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                selected_indices = tf.image.non_max_suppression(
         | 
| 18 | 
            +
                    boxes=decode_preds[:, :4],
         | 
| 19 | 
            +
                    scores=decode_preds[:, -1],
         | 
| 20 | 
            +
                    max_output_size=tf.shape(decode_preds)[0],
         | 
| 21 | 
            +
                    iou_threshold=iou_th,
         | 
| 22 | 
            +
                    score_threshold=score_th)
         | 
| 23 | 
            +
             | 
| 24 | 
            +
                out = tf.gather(decode_preds, selected_indices)
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                return out
         | 
| 27 | 
            +
             | 
    	
        utils/__pycache__/utils.cpython-38.pyc
    ADDED
    
    | Binary file (11.6 kB). View file | 
|  | 
    	
        utils/utils.py
    ADDED
    
    | @@ -0,0 +1,377 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import json
         | 
| 2 | 
            +
            from tensorflow.keras.models import model_from_json
         | 
| 3 | 
            +
            from networks.layers import AdaIN, AdaptiveAttention
         | 
| 4 | 
            +
            import tensorflow as tf
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            import numpy as np
         | 
| 7 | 
            +
            import cv2
         | 
| 8 | 
            +
            import math
         | 
| 9 | 
            +
            from skimage import transform as trans
         | 
| 10 | 
            +
            from scipy.signal import convolve2d
         | 
| 11 | 
            +
            from skimage.color import rgb2yuv, yuv2rgb
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            from PIL import Image
         | 
| 14 | 
            +
             | 
| 15 | 
            +
             | 
| 16 | 
            +
            def save_model_internal(model, path, name, num):
         | 
| 17 | 
            +
                json_model = model.to_json()
         | 
| 18 | 
            +
                with open(path + name + '.json', "w") as json_file:
         | 
| 19 | 
            +
                    json_file.write(json_model)
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                model.save_weights(path + name + '_' + str(num) + '.h5')
         | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
            def load_model_internal(path, name, num):
         | 
| 25 | 
            +
                with open(path + name + '.json', 'r') as json_file:
         | 
| 26 | 
            +
                    model_dict = json_file.read()
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                mod = model_from_json(model_dict, custom_objects={'AdaIN': AdaIN, 'AdaptiveAttention': AdaptiveAttention})
         | 
| 29 | 
            +
                mod.load_weights(path + name + '_' + str(num) + '.h5')
         | 
| 30 | 
            +
             | 
| 31 | 
            +
                return mod
         | 
| 32 | 
            +
             | 
| 33 | 
            +
             | 
| 34 | 
            +
            def save_training_meta(state_dict, path, num):
         | 
| 35 | 
            +
                with open(path + str(num) + '.json', 'w') as json_file:
         | 
| 36 | 
            +
                    json.dump(state_dict, json_file, indent=2)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
             | 
| 39 | 
            +
            def load_training_meta(path, num):
         | 
| 40 | 
            +
                with open(path + str(num) + '.json', 'r') as json_file:
         | 
| 41 | 
            +
                    state_dict = json.load(json_file)
         | 
| 42 | 
            +
                return state_dict
         | 
| 43 | 
            +
             | 
| 44 | 
            +
             | 
| 45 | 
            +
            def log_info(sw, results_dict, iteration):
         | 
| 46 | 
            +
                with sw.as_default():
         | 
| 47 | 
            +
                    for key in results_dict.keys():
         | 
| 48 | 
            +
                        tf.summary.scalar(key, results_dict[key], step=iteration)
         | 
| 49 | 
            +
             | 
| 50 | 
            +
             | 
| 51 | 
            +
            src1 = np.array([[51.642, 50.115], [57.617, 49.990], [35.740, 69.007],
         | 
| 52 | 
            +
                             [51.157, 89.050], [57.025, 89.702]],
         | 
| 53 | 
            +
                            dtype=np.float32)
         | 
| 54 | 
            +
            # <--left
         | 
| 55 | 
            +
            src2 = np.array([[45.031, 50.118], [65.568, 50.872], [39.677, 68.111],
         | 
| 56 | 
            +
                             [45.177, 86.190], [64.246, 86.758]],
         | 
| 57 | 
            +
                            dtype=np.float32)
         | 
| 58 | 
            +
             | 
| 59 | 
            +
            # ---frontal
         | 
| 60 | 
            +
            src3 = np.array([[39.730, 51.138], [72.270, 51.138], [56.000, 68.493],
         | 
| 61 | 
            +
                             [42.463, 87.010], [69.537, 87.010]],
         | 
| 62 | 
            +
                            dtype=np.float32)
         | 
| 63 | 
            +
             | 
| 64 | 
            +
            # -->right
         | 
| 65 | 
            +
            src4 = np.array([[46.845, 50.872], [67.382, 50.118], [72.737, 68.111],
         | 
| 66 | 
            +
                             [48.167, 86.758], [67.236, 86.190]],
         | 
| 67 | 
            +
                            dtype=np.float32)
         | 
| 68 | 
            +
             | 
| 69 | 
            +
            # -->right profile
         | 
| 70 | 
            +
            src5 = np.array([[54.796, 49.990], [60.771, 50.115], [76.673, 69.007],
         | 
| 71 | 
            +
                             [55.388, 89.702], [61.257, 89.050]],
         | 
| 72 | 
            +
                            dtype=np.float32)
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            src = np.array([src1, src2, src3, src4, src5])
         | 
| 75 | 
            +
            src_map = {112: src, 224: src * 2}
         | 
| 76 | 
            +
             | 
| 77 | 
            +
            # Left eye, right eye, nose, left mouth, right mouth
         | 
| 78 | 
            +
            arcface_src = np.array(
         | 
| 79 | 
            +
                [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
         | 
| 80 | 
            +
                 [41.5493, 92.3655], [70.7299, 92.2041]],
         | 
| 81 | 
            +
                dtype=np.float32)
         | 
| 82 | 
            +
             | 
| 83 | 
            +
            arcface_src = np.expand_dims(arcface_src, axis=0)
         | 
| 84 | 
            +
             | 
| 85 | 
            +
             | 
| 86 | 
            +
            def extract_face(img, bb, absolute_center, mode='arcface', extention_rate=0.05, debug=False):
         | 
| 87 | 
            +
                """Extract face from image given a bounding box"""
         | 
| 88 | 
            +
                # bbox
         | 
| 89 | 
            +
                x1, y1, x2, y2 = bb + 60
         | 
| 90 | 
            +
                adjusted_absolute_center = (absolute_center[0] + 60, absolute_center[1] + 60)
         | 
| 91 | 
            +
                if debug:
         | 
| 92 | 
            +
                    print(bb + 60)
         | 
| 93 | 
            +
                    x1, y1, x2, y2 = bb
         | 
| 94 | 
            +
                    cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 3)
         | 
| 95 | 
            +
                    cv2.circle(img, absolute_center, 1, (255, 0, 255), 2)
         | 
| 96 | 
            +
                    Image.fromarray(img).show()
         | 
| 97 | 
            +
                    x1, y1, x2, y2 = bb + 60
         | 
| 98 | 
            +
                # Pad image in case face is out of frame
         | 
| 99 | 
            +
                padded_img = np.zeros(shape=(248, 248, 3), dtype=np.uint8)
         | 
| 100 | 
            +
                padded_img[60:-60, 60:-60, :] = img
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                if debug:
         | 
| 103 | 
            +
                    cv2.rectangle(padded_img, (x1, y1), (x2, y2), (0, 255, 255), 3)
         | 
| 104 | 
            +
                    cv2.circle(padded_img, adjusted_absolute_center, 1, (255, 255, 255), 2)
         | 
| 105 | 
            +
                    Image.fromarray(padded_img).show()
         | 
| 106 | 
            +
             | 
| 107 | 
            +
                y_len = abs(y1 - y2)
         | 
| 108 | 
            +
                x_len = abs(x1 - x2)
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                new_len = (y_len + x_len) // 2
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                extension = int(new_len * extention_rate)
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                x_adjust = (x_len - new_len) // 2
         | 
| 115 | 
            +
                y_adjust = (y_len - new_len) // 2
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                x_1_adjusted = x1 + x_adjust - extension
         | 
| 118 | 
            +
                x_2_adjusted = x2 - x_adjust + extension
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                if mode == 'arcface':
         | 
| 121 | 
            +
                    y_1_adjusted = y1 - extension
         | 
| 122 | 
            +
                    y_2_adjusted = y2 - 2 * y_adjust + extension
         | 
| 123 | 
            +
                else:
         | 
| 124 | 
            +
                    y_1_adjusted = y1 + 2 * y_adjust - extension
         | 
| 125 | 
            +
                    y_2_adjusted = y2 + extension
         | 
| 126 | 
            +
             | 
| 127 | 
            +
                move_x = adjusted_absolute_center[0] - (x_1_adjusted + x_2_adjusted) // 2
         | 
| 128 | 
            +
                move_y = adjusted_absolute_center[1] - (y_1_adjusted + y_2_adjusted) // 2
         | 
| 129 | 
            +
             | 
| 130 | 
            +
                x_1_adjusted = x_1_adjusted + move_x
         | 
| 131 | 
            +
                x_2_adjusted = x_2_adjusted + move_x
         | 
| 132 | 
            +
                y_1_adjusted = y_1_adjusted + move_y
         | 
| 133 | 
            +
                y_2_adjusted = y_2_adjusted + move_y
         | 
| 134 | 
            +
             | 
| 135 | 
            +
                # print(y_1_adjusted, y_2_adjusted, x_1_adjusted, x_2_adjusted)
         | 
| 136 | 
            +
             | 
| 137 | 
            +
                return padded_img[y_1_adjusted:y_2_adjusted, x_1_adjusted:x_2_adjusted]
         | 
| 138 | 
            +
             | 
| 139 | 
            +
             | 
| 140 | 
            +
            def distance(a, b):
         | 
| 141 | 
            +
                return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
         | 
| 142 | 
            +
             | 
| 143 | 
            +
             | 
| 144 | 
            +
            def euclidean_distance(a, b):
         | 
| 145 | 
            +
                x1 = a[0]; y1 = a[1]
         | 
| 146 | 
            +
                x2 = b[0]; y2 = b[1]
         | 
| 147 | 
            +
                return np.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
         | 
| 148 | 
            +
             | 
| 149 | 
            +
             | 
| 150 | 
            +
            def align_face(img, landmarks, debug=False):
         | 
| 151 | 
            +
                nose, right_eye, left_eye = landmarks
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                left_eye_x = left_eye[0]
         | 
| 154 | 
            +
                left_eye_y = left_eye[1]
         | 
| 155 | 
            +
             | 
| 156 | 
            +
                right_eye_x = right_eye[0]
         | 
| 157 | 
            +
                right_eye_y = right_eye[1]
         | 
| 158 | 
            +
             | 
| 159 | 
            +
                center_eye = ((left_eye[0] + right_eye[0]) // 2, (left_eye[1] + right_eye[1]) // 2)
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                if left_eye_y < right_eye_y:
         | 
| 162 | 
            +
                    point_3rd = (right_eye_x, left_eye_y)
         | 
| 163 | 
            +
                    direction = -1
         | 
| 164 | 
            +
                else:
         | 
| 165 | 
            +
                    point_3rd = (left_eye_x, right_eye_y)
         | 
| 166 | 
            +
                    direction = 1
         | 
| 167 | 
            +
             | 
| 168 | 
            +
                if debug:
         | 
| 169 | 
            +
                    cv2.circle(img, point_3rd, 1, (255, 0, 0), 1)
         | 
| 170 | 
            +
                    cv2.circle(img, center_eye, 1, (255, 0, 0), 1)
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                    cv2.line(img, right_eye, left_eye, (0, 0, 0), 1)
         | 
| 173 | 
            +
                    cv2.line(img, left_eye, point_3rd, (0, 0, 0), 1)
         | 
| 174 | 
            +
                    cv2.line(img, right_eye, point_3rd, (0, 0, 0), 1)
         | 
| 175 | 
            +
             | 
| 176 | 
            +
                a = euclidean_distance(left_eye, point_3rd)
         | 
| 177 | 
            +
                b = euclidean_distance(right_eye, left_eye)
         | 
| 178 | 
            +
                c = euclidean_distance(right_eye, point_3rd)
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                cos_a = (b * b + c * c - a * a) / (2 * b * c)
         | 
| 181 | 
            +
             | 
| 182 | 
            +
                angle = np.arccos(cos_a)
         | 
| 183 | 
            +
             | 
| 184 | 
            +
                angle = (angle * 180) / np.pi
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                if direction == -1:
         | 
| 187 | 
            +
                    angle = 90 - angle
         | 
| 188 | 
            +
                    ang = math.radians(direction * angle)
         | 
| 189 | 
            +
                else:
         | 
| 190 | 
            +
                    ang = math.radians(direction * angle)
         | 
| 191 | 
            +
                    angle = 0 - angle
         | 
| 192 | 
            +
             | 
| 193 | 
            +
                M = cv2.getRotationMatrix2D((64, 64), angle, 1)
         | 
| 194 | 
            +
                new_img = cv2.warpAffine(img, M, (128, 128),
         | 
| 195 | 
            +
                                        flags=cv2.INTER_CUBIC)
         | 
| 196 | 
            +
             | 
| 197 | 
            +
                rotated_nose = (int((nose[0] - 64) * np.cos(ang) - (nose[1] - 64) * np.sin(ang) + 64),
         | 
| 198 | 
            +
                                int((nose[0] - 64) * np.sin(ang) + (nose[1] - 64) * np.cos(ang) + 64))
         | 
| 199 | 
            +
             | 
| 200 | 
            +
                rotated_center_eye = (int((center_eye[0] - 64) * np.cos(ang) - (center_eye[1] - 64) * np.sin(ang) + 64),
         | 
| 201 | 
            +
                                      int((center_eye[0] - 64) * np.sin(ang) + (center_eye[1] - 64) * np.cos(ang) + 64))
         | 
| 202 | 
            +
             | 
| 203 | 
            +
                abolute_center = (rotated_center_eye[0], (rotated_nose[1] + rotated_center_eye[1]) // 2)
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                if debug:
         | 
| 206 | 
            +
                    cv2.circle(new_img, rotated_nose, 1, (0, 0, 255), 1)
         | 
| 207 | 
            +
                    cv2.circle(new_img, rotated_center_eye, 1, (0, 0, 255), 1)
         | 
| 208 | 
            +
                    cv2.circle(new_img, abolute_center, 1, (0, 0, 255), 1)
         | 
| 209 | 
            +
             | 
| 210 | 
            +
                return new_img, abolute_center
         | 
| 211 | 
            +
             | 
| 212 | 
            +
             | 
| 213 | 
            +
            def estimate_norm(lmk, image_size=112, mode='arcface', shrink_factor=1.0):
         | 
| 214 | 
            +
                assert lmk.shape == (5, 2)
         | 
| 215 | 
            +
                tform = trans.SimilarityTransform()
         | 
| 216 | 
            +
                lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
         | 
| 217 | 
            +
                min_M = []
         | 
| 218 | 
            +
                min_index = []
         | 
| 219 | 
            +
                min_error = float('inf')
         | 
| 220 | 
            +
                src_factor = image_size / 112
         | 
| 221 | 
            +
                if mode == 'arcface':
         | 
| 222 | 
            +
                    src = arcface_src * shrink_factor + (1 - shrink_factor) * 56
         | 
| 223 | 
            +
                    src = src * src_factor
         | 
| 224 | 
            +
                else:
         | 
| 225 | 
            +
                    src = src_map[image_size] * src_factor
         | 
| 226 | 
            +
                for i in np.arange(src.shape[0]):
         | 
| 227 | 
            +
                    tform.estimate(lmk, src[i])
         | 
| 228 | 
            +
                    M = tform.params[0:2, :]
         | 
| 229 | 
            +
                    results = np.dot(M, lmk_tran.T)
         | 
| 230 | 
            +
                    results = results.T
         | 
| 231 | 
            +
                    error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))
         | 
| 232 | 
            +
                    #         print(error)
         | 
| 233 | 
            +
                    if error < min_error:
         | 
| 234 | 
            +
                        min_error = error
         | 
| 235 | 
            +
                        min_M = M
         | 
| 236 | 
            +
                        min_index = i
         | 
| 237 | 
            +
                return min_M, min_index
         | 
| 238 | 
            +
             | 
| 239 | 
            +
             | 
| 240 | 
            +
            def inverse_estimate_norm(lmk, t_lmk, image_size=112, mode='arcface', shrink_factor=1.0):
         | 
| 241 | 
            +
                assert lmk.shape == (5, 2)
         | 
| 242 | 
            +
                tform = trans.SimilarityTransform()
         | 
| 243 | 
            +
                lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
         | 
| 244 | 
            +
                min_M = []
         | 
| 245 | 
            +
                min_index = []
         | 
| 246 | 
            +
                min_error = float('inf')
         | 
| 247 | 
            +
                src_factor = image_size / 112
         | 
| 248 | 
            +
                if mode == 'arcface':
         | 
| 249 | 
            +
                    src = arcface_src * shrink_factor + (1 - shrink_factor) * 56
         | 
| 250 | 
            +
                    src = src * src_factor
         | 
| 251 | 
            +
                else:
         | 
| 252 | 
            +
                    src = src_map[image_size] * src_factor
         | 
| 253 | 
            +
                for i in np.arange(src.shape[0]):
         | 
| 254 | 
            +
                    tform.estimate(t_lmk, lmk)
         | 
| 255 | 
            +
                    M = tform.params[0:2, :]
         | 
| 256 | 
            +
                    results = np.dot(M, lmk_tran.T)
         | 
| 257 | 
            +
                    results = results.T
         | 
| 258 | 
            +
                    error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))
         | 
| 259 | 
            +
                    #         print(error)
         | 
| 260 | 
            +
                    if error < min_error:
         | 
| 261 | 
            +
                        min_error = error
         | 
| 262 | 
            +
                        min_M = M
         | 
| 263 | 
            +
                        min_index = i
         | 
| 264 | 
            +
                return min_M, min_index
         | 
| 265 | 
            +
             | 
| 266 | 
            +
             | 
| 267 | 
            +
            def norm_crop(img, landmark, image_size=112, mode='arcface', shrink_factor=1.0):
         | 
| 268 | 
            +
                """
         | 
| 269 | 
            +
                Align and crop the image based of the facial landmarks in the image. The alignment is done with
         | 
| 270 | 
            +
                a similarity transformation based of source coordinates.
         | 
| 271 | 
            +
                :param img: Image to transform.
         | 
| 272 | 
            +
                :param landmark: Five landmark coordinates in the image.
         | 
| 273 | 
            +
                :param image_size: Desired output size after transformation.
         | 
| 274 | 
            +
                :param mode: 'arcface' aligns the face for the use of Arcface facial recognition model. Useful for
         | 
| 275 | 
            +
                both facial recognition tasks and face swapping tasks.
         | 
| 276 | 
            +
                :param shrink_factor: Shrink factor that shrinks the source landmark coordinates. This will include more border
         | 
| 277 | 
            +
                information around the face. Useful when you want to include more background information when performing face swaps.
         | 
| 278 | 
            +
                The lower the shrink factor the more of the face is included. Default value 1.0 will align the image to be ready
         | 
| 279 | 
            +
                for the Arcface recognition model, but usually omits part of the chin. Value of 0.0 would transform all source points
         | 
| 280 | 
            +
                to the middle of the image, probably rendering the alignment procedure useless.
         | 
| 281 | 
            +
             | 
| 282 | 
            +
                If you process the image with a shrink factor of 0.85 and then want to extract the identity embedding with arcface,
         | 
| 283 | 
            +
                you simply do a central crop of factor 0.85 to yield same cropped result as using shrink factor 1.0. This will
         | 
| 284 | 
            +
                reduce the resolution, the recommendation is to processed images to output resolutions higher than 112 is using
         | 
| 285 | 
            +
                Arcface. This will make sure no information is lost by resampling the image after central crop.
         | 
| 286 | 
            +
                :return: Returns the transformed image.
         | 
| 287 | 
            +
                """
         | 
| 288 | 
            +
                M, pose_index = estimate_norm(landmark, image_size, mode, shrink_factor=shrink_factor)
         | 
| 289 | 
            +
                warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
         | 
| 290 | 
            +
                return warped
         | 
| 291 | 
            +
             | 
| 292 | 
            +
             | 
| 293 | 
            +
            def transform_landmark_points(M, points):
         | 
| 294 | 
            +
                lmk_tran = np.insert(points, 2, values=np.ones(5), axis=1)
         | 
| 295 | 
            +
                transformed_lmk = np.dot(M, lmk_tran.T)
         | 
| 296 | 
            +
                transformed_lmk = transformed_lmk.T
         | 
| 297 | 
            +
             | 
| 298 | 
            +
                return transformed_lmk
         | 
| 299 | 
            +
             | 
| 300 | 
            +
             | 
| 301 | 
            +
            def multi_convolver(image, kernel, iterations):
         | 
| 302 | 
            +
                if kernel == "Sharpen":
         | 
| 303 | 
            +
                    kernel = np.array([[0, -1, 0],
         | 
| 304 | 
            +
                                       [-1, 5, -1],
         | 
| 305 | 
            +
                                       [0, -1, 0]])
         | 
| 306 | 
            +
                elif kernel == "Unsharp_mask":
         | 
| 307 | 
            +
                    kernel = np.array([[1, 4, 6, 4, 1],
         | 
| 308 | 
            +
                                       [4, 16, 24, 16, 1],
         | 
| 309 | 
            +
                                       [6, 24, -476, 24, 1],
         | 
| 310 | 
            +
                                       [4, 16, 24, 16, 1],
         | 
| 311 | 
            +
                                       [1, 4, 6, 4, 1]]) * (-1 / 256)
         | 
| 312 | 
            +
                elif kernel == "Blur":
         | 
| 313 | 
            +
                    kernel = (1 / 16.0) * np.array([[1., 2., 1.],
         | 
| 314 | 
            +
                                                    [2., 4., 2.],
         | 
| 315 | 
            +
                                                    [1., 2., 1.]])
         | 
| 316 | 
            +
                for i in range(iterations):
         | 
| 317 | 
            +
                    image = convolve2d(image, kernel, 'same', boundary='fill', fillvalue = 0)
         | 
| 318 | 
            +
                return image
         | 
| 319 | 
            +
             | 
| 320 | 
            +
             | 
| 321 | 
            +
            def convolve_rgb(image, kernel, iterations=1):
         | 
| 322 | 
            +
                img_yuv = rgb2yuv(image)
         | 
| 323 | 
            +
                img_yuv[:, :, 0] = multi_convolver(img_yuv[:, :, 0], kernel,
         | 
| 324 | 
            +
                                                   iterations)
         | 
| 325 | 
            +
                final_image = yuv2rgb(img_yuv)
         | 
| 326 | 
            +
             | 
| 327 | 
            +
                return final_image.astype('float32')
         | 
| 328 | 
            +
             | 
| 329 | 
            +
             | 
| 330 | 
            +
            def generate_mask_from_landmarks(lms, im_size):
         | 
| 331 | 
            +
                blend_mask_lm = np.zeros(shape=(im_size, im_size, 3), dtype='float32')
         | 
| 332 | 
            +
             | 
| 333 | 
            +
                # EYES
         | 
| 334 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 335 | 
            +
                                           (int(lms[0][0]), int(lms[0][1])), 12, (255, 255, 255), 30)
         | 
| 336 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 337 | 
            +
                                           (int(lms[1][0]), int(lms[1][1])), 12, (255, 255, 255), 30)
         | 
| 338 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 339 | 
            +
                                           (int((lms[0][0] + lms[1][0]) / 2), int((lms[0][1] + lms[1][1]) / 2)),
         | 
| 340 | 
            +
                                           16, (255, 255, 255), 65)
         | 
| 341 | 
            +
             | 
| 342 | 
            +
                # NOSE
         | 
| 343 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 344 | 
            +
                                           (int(lms[2][0]), int(lms[2][1])), 5, (255, 255, 255), 5)
         | 
| 345 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 346 | 
            +
                                           (int((lms[0][0] + lms[1][0]) / 2), int(lms[2][1])), 16, (255, 255, 255), 100)
         | 
| 347 | 
            +
             | 
| 348 | 
            +
                # MOUTH
         | 
| 349 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 350 | 
            +
                                           (int(lms[3][0]), int(lms[3][1])), 6, (255, 255, 255), 30)
         | 
| 351 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 352 | 
            +
                                           (int(lms[4][0]), int(lms[4][1])), 6, (255, 255, 255), 30)
         | 
| 353 | 
            +
             | 
| 354 | 
            +
                blend_mask_lm = cv2.circle(blend_mask_lm,
         | 
| 355 | 
            +
                                           (int((lms[3][0] + lms[4][0]) / 2), int((lms[3][1] + lms[4][1]) / 2)),
         | 
| 356 | 
            +
                                           16, (255, 255, 255), 40)
         | 
| 357 | 
            +
                return blend_mask_lm
         | 
| 358 | 
            +
             | 
| 359 | 
            +
             | 
| 360 | 
            +
            def display_distance_text(im, distance, lms, im_w, im_h, scale=2):
         | 
| 361 | 
            +
                blended_insert = cv2.putText(im, str(distance)[:4],
         | 
| 362 | 
            +
                                             (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)),
         | 
| 363 | 
            +
                                             cv2.FONT_HERSHEY_SIMPLEX, scale * 0.5, (0.08, 0.16, 0.08), int(scale * 2))
         | 
| 364 | 
            +
                blended_insert = cv2.putText(blended_insert, str(distance)[:4],
         | 
| 365 | 
            +
                                             (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)),
         | 
| 366 | 
            +
                                             cv2.FONT_HERSHEY_SIMPLEX, scale*  0.5, (0.3, 0.7, 0.32), int(scale * 1))
         | 
| 367 | 
            +
                return blended_insert
         | 
| 368 | 
            +
             | 
| 369 | 
            +
             | 
| 370 | 
            +
            def get_lm(annotation, im_w, im_h):
         | 
| 371 | 
            +
                lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h],
         | 
| 372 | 
            +
                                     [annotation[6] * im_w, annotation[7] * im_h],
         | 
| 373 | 
            +
                                     [annotation[8] * im_w, annotation[9] * im_h],
         | 
| 374 | 
            +
                                     [annotation[10] * im_w, annotation[11] * im_h],
         | 
| 375 | 
            +
                                     [annotation[12] * im_w, annotation[13] * im_h]],
         | 
| 376 | 
            +
                                    dtype=np.float32)
         | 
| 377 | 
            +
                return lm_align
         | 
