Edit model card

xgb_au

Model Description

xgb_au combines histogram of oriented gradient feature extraction with gradient boosting to predict facial action units from single frame images.

Model Details

  • Model Type: Gradient Boosting (XGB)
  • Framework: sklearn

Model Sources

Citation

If you use the svm_au model in your research or application, please cite the following paper:

Cheong, J.H., Jolly, E., Xie, T. et al. Py-Feat: Python Facial Expression Analysis Toolbox. Affec Sci 4, 781–796 (2023). https://doi.org/10.1007/s42761-023-00191-4

@article{cheong2023py,
  title={Py-feat: Python facial expression analysis toolbox},
  author={Cheong, Jin Hyun and Jolly, Eshin and Xie, Tiankang and Byrne, Sophie and Kenney, Matthew and Chang, Luke J},
  journal={Affective Science},
  volume={4},
  number={4},
  pages={781--796},
  year={2023},
  publisher={Springer}
}

Example Useage

import numpy as np
from skops.io import dump, load, get_untrusted_types
from huggingface_hub import hf_hub_download

class XGBClassifier:
    def __init__(self) -> None:

        self.au_keys = [
                "AU1", "AU2", "AU4", "AU5", "AU6", "AU7", "AU9", "AU10", "AU11", "AU12",
                "AU14", "AU15", "AU17", "AU20", "AU23", "AU24", "AU25", "AU26", "AU28", "AU43"
            ]
        self.weights_loaded = False
        
    def load_weights(self, scaler_upper=None, pca_model_upper=None, scaler_lower=None, pca_model_lower=None, scaler_full=None, pca_model_full=None, classifiers=None):
        self.scaler_upper = scaler_upper
        self.pca_model_upper = pca_model_upper
        self.scaler_lower = scaler_lower
        self.pca_model_lower = pca_model_lower
        self.scaler_full = scaler_full
        self.pca_model_full = pca_model_full
        self.classifiers = classifiers
        self.weights_loaded = True
        
    def pca_transform(self, frame, scaler, pca_model, landmarks):
        if not self.weights_loaded:
            raise ValueError('Need to load weights before running pca_transform')
        else:
            transformed_frame = pca_model.transform(scaler.transform(frame))
            return np.concatenate((transformed_frame, landmarks), axis=1)

    def detect_au(self, frame, landmarks):
        if not self.weights_loaded:
            raise ValueError('Need to load weights before running detect_au')
        else:
            landmarks = np.concatenate(landmarks)
            landmarks = landmarks.reshape(-1, landmarks.shape[1] * landmarks.shape[2])
            
            pca_transformed_upper = self.pca_transform(frame, self.scaler_upper, self.pca_model_upper, landmarks)
            pca_transformed_lower = self.pca_transform(frame, self.scaler_lower, self.pca_model_lower, landmarks)
            pca_transformed_full = self.pca_transform(frame, self.scaler_full, self.pca_model_full, landmarks)
    
            pred_aus = []
            for key in self.au_keys:
                classifier = self.classifiers[key]
    
                if key in ["AU1", "AU2", "AU7"]:
                    au_pred = classifier.predict_proba(pca_transformed_upper)[:, 1]
                elif key in ["AU11", "AU14", "AU17", "AU23", "AU24", "AU26"]:
                    au_pred = classifier.predict_proba(pca_transformed_lower)[:, 1]
                else:
                    au_pred = classifier.predict_proba(pca_transformed_full)[:, 1]
    
                pred_aus.append(au_pred)
    
            return np.array(pred_aus).T    def __init__(self) -> None:
        self.weights_loaded = False
        
    def load_weights(self, scaler_upper=None, pca_model_upper=None, scaler_lower=None, pca_model_lower=None, scaler_full=None, pca_model_full=None, classifiers=None):
        self.scaler_upper = scaler_upper
        self.pca_model_upper = pca_model_upper
        self.scaler_lower = scaler_lower
        self.pca_model_lower = pca_model_lower
        self.scaler_full = scaler_full
        self.pca_model_full = pca_model_full
        self.classifiers = classifiers
        self.weights_loaded = True

    def pca_transform(self, frame, scaler, pca_model, landmarks):
        if not self.weights_loaded:
            raise ValueError('Need to load weights before running pca_transform')
        else:
            transformed_frame = pca_model.transform(scaler.transform(frame))
            return np.concatenate((transformed_frame, landmarks), axis=1)      

    def detect_au(self, frame, landmarks):
        """
        Note that here frame is represented by hogs
        """
        if not self.weights_loaded:
            raise ValueError('Need to load weights before running detect_au')
        else:
            landmarks = np.concatenate(landmarks)
            landmarks = landmarks.reshape(-1, landmarks.shape[1] * landmarks.shape[2])
    
            pca_transformed_upper = self.pca_transform(frame, self.scaler_upper, self.pca_model_upper, landmarks)
            pca_transformed_lower = self.pca_transform(frame, self.scaler_lower, self.pca_model_lower, landmarks)
            pca_transformed_full = self.pca_transform(frame, self.scaler_full, self.pca_model_full, landmarks)
            
            aus_list = sorted(self.classifiers.keys(), key=lambda x: int(x[2::]))
    
            pred_aus = []
            for keys in aus_list:
                if keys in ["AU1", "AU4", "AU6"]:
                    au_pred = self.classifiers[keys].predict(pca_transformed_upper)
                elif keys in ["AU11", "AU12", "AU17"]:
                    au_pred = self.classifiers[keys].predict(pca_transformed_lower)
                elif keys in [
                    "AU2",
                    "AU5",
                    "AU7",
                    "AU9",
                    "AU10",
                    "AU14",
                    "AU15",
                    "AU20",
                    "AU23",
                    "AU24",
                    "AU25",
                    "AU26",
                    "AU28",
                    "AU43",
                ]:
                    au_pred = self.classifiers[keys].predict(pca_transformed_full)
                else:
                    raise ValueError("unknown AU detected")
    
                pred_aus.append(au_pred)
            pred_aus = np.array(pred_aus).T
            return pred_aus          

# Load model and weights
au_model = XGBClassifier()
model_path = hf_hub_download(repo_id="py-feat/xgb_au", filename="xgb_au_classifier.skops")
unknown_types = get_untrusted_types(file=model_path)
loaded_model = load(model_path, trusted=unknown_types)
au_model.load_weights(scaler_upper = loaded_model.scaler_upper, 
                      pca_model_upper = loaded_model.pca_model_upper,
                      scaler_lower = loaded_model.scaler_lower, 
                      pca_model_lower = loaded_model.scaler_full, 
                      pca_model_full=loaded_model.pca_model_full, 
                      classifiers=loaded_model.classifiers)

# Test model
frame = "path/to/your/test_image.jpg"  # Replace with your loaded image
landmarks = np.array([...])  # Replace with your landmarks data
pred = au_model.detect_au(frame, landmarks)
print(pred)
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference API
Inference API (serverless) does not yet support py-feat models for this pipeline type.