rsinema commited on
Commit
bc4d46c
·
verified ·
1 Parent(s): 94dcae6

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Aesthetic Scorer
3
+
4
+ This model predicts 7 different aesthetic metrics for images:
5
+ - Overall aesthetic score
6
+ - Technical quality score
7
+ - Composition score
8
+ - Lighting score
9
+ - Color harmony score
10
+ - Depth of field score
11
+ - Content score
12
+
13
+ ## Model Details
14
+ - Based on CLIP ViT-B/32 visual encoder
15
+ - Fine-tuned on the PARA dataset
16
+ - Returns scores between 0-5 for each aesthetic dimension
17
+
18
+ ## Usage
19
+
20
+ ```python
21
+ from transformers import CLIPProcessor
22
+ from aesthetic_scorer import AestheticScorer
23
+ import torch
24
+ from PIL import Image
25
+
26
+ # Load the model
27
+ processor = CLIPProcessor.from_pretrained("YOUR_USERNAME/aesthetic-scorer")
28
+ model = torch.load("YOUR_USERNAME/aesthetic-scorer/model.pt")
29
+
30
+ # Process an image
31
+ image = Image.open("your_image.jpg")
32
+ inputs = processor(images=image, return_tensors="pt")["pixel_values"]
33
+
34
+ # Get scores
35
+ with torch.no_grad():
36
+ scores = model(inputs)
37
+
38
+ # Print results
39
+ aesthetic_categories = ["Overall", "Quality", "Composition", "Lighting", "Color", "Depth of Field", "Content"]
40
+ for category, score in zip(aesthetic_categories, scores):
41
+ print(f"{category}: {score.item():.2f}/10")
42
+ ```
aesthetic_scorer.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch.nn as nn
3
+
4
+ class AestheticScorer(nn.Module):
5
+ '''
6
+ Fine-tuned CLIP model to predict aesthetic scores (e.g., light, depth, composition) based on the PARA dataset.
7
+ '''
8
+ def __init__(self, backbone):
9
+ super().__init__()
10
+ self.backbone = backbone
11
+
12
+ # Define the scoring heads
13
+ hidden_dim = backbone.config.hidden_size
14
+ self.aesthetic_head = nn.Sequential(
15
+ nn.Linear(hidden_dim, 1),
16
+ )
17
+
18
+ self.quality_head = nn.Sequential(
19
+ nn.Linear(hidden_dim, 1),
20
+ )
21
+
22
+ self.composition_head = nn.Sequential(
23
+ nn.Linear(hidden_dim, 1),
24
+ )
25
+
26
+ self.light_head = nn.Sequential(
27
+ nn.Linear(hidden_dim, 1),
28
+ )
29
+
30
+ self.color_head = nn.Sequential(
31
+ nn.Linear(hidden_dim, 1),
32
+ )
33
+
34
+ self.dof_head = nn.Sequential(
35
+ nn.Linear(hidden_dim, 1),
36
+ )
37
+
38
+ self.content_head = nn.Sequential(
39
+ nn.Linear(hidden_dim, 1),
40
+ )
41
+
42
+ def forward(self, pixel_values):
43
+ features = self.backbone(pixel_values).pooler_output
44
+ return self.aesthetic_head(features), self.quality_head(features), self.composition_head(features), self.light_head(features), self.color_head(features), self.dof_head(features), self.content_head(features)
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59853d88e95c287d101bd692c876232f5cd4a860299060d370258ad68b36042d
3
+ size 349912662
preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "processor_class": "CLIPProcessor",
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch>=1.8.0
2
+ transformers>=4.11.0
3
+ pillow>=8.0.0
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": false,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "extra_special_tokens": {},
27
+ "model_max_length": 77,
28
+ "pad_token": "<|endoftext|>",
29
+ "processor_class": "CLIPProcessor",
30
+ "tokenizer_class": "CLIPTokenizer",
31
+ "unk_token": "<|endoftext|>"
32
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff