hungchiayu commited on
Commit
dd905c9
·
1 Parent(s): 2d8fe48

Upload 2 files

Browse files
Files changed (2) hide show
  1. VT5.py +68 -0
  2. weights.bin +3 -0
VT5.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from typing import Tuple
3
+ import torch
4
+ from torch import nn
5
+ from transformers import (
6
+ AutoModelForSeq2SeqLM,
7
+ AutoTokenizer,
8
+ Trainer,
9
+ TrainingArguments,
10
+ )
11
+
12
+
13
+ class MLP(nn.Module):
14
+
15
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
16
+ return self.model(x)
17
+
18
+ def __init__(self, sizes: Tuple[int, ...], bias=True, act=nn.Tanh):
19
+ super(MLP, self).__init__()
20
+ layers = []
21
+ for i in range(len(sizes) - 1):
22
+ layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=bias))
23
+ if i < len(sizes) - 2:
24
+ layers.append(act())
25
+ self.model = nn.Sequential(*layers)
26
+
27
+ class VT5(nn.Module):
28
+
29
+
30
+ def __init__(self,t5,tokenizer,vision_model,image_emb_size=512,prefix_length=10):
31
+ super().__init__()
32
+ self.t5 = t5
33
+ self.tokenizer = tokenizer
34
+ self.t5_embedding_size = t5.get_input_embeddings().embedding_dim
35
+ self.image_emb_size = image_emb_size
36
+ self.prefix_length = prefix_length
37
+ self.vision_model = vision_model
38
+ ## This is the mapping networks that projects the image embedding space to the language model vector space
39
+ self.prefix_projection = MLP((self.image_emb_size, (self.t5_embedding_size * prefix_length) // 2,
40
+ self.t5_embedding_size * prefix_length))
41
+
42
+ def forward(self,pixel_values,output_ids):
43
+
44
+ image_embeds = self.vision_model(pixel_values).image_embeds
45
+
46
+ mapped_embedding = self.prefix_projection(image_embeds).view(-1,self.prefix_length,self.t5_embedding_size)
47
+
48
+ ##concat_embedding = torch.cat([text_embedding,mapped_embedding],axis=1)
49
+
50
+ output_ids[output_ids == self.tokenizer.pad_token_id] = -100 ## Do not compute loss w.r.t pad tokens
51
+
52
+ outputs = self.t5(inputs_embeds=mapped_embedding,labels=output_ids)
53
+
54
+ return outputs
55
+
56
+ def generate_caption(self,pixel_values):
57
+
58
+ image_embeds = self.vision_model(pixel_values).image_embeds
59
+ mapped_embedding = self.prefix_projection(image_embeds).view(-1,self.prefix_length,self.t5_embedding_size)
60
+
61
+ output_tokens = self.t5.generate(inputs_embeds=mapped_embedding)
62
+ caption = self.tokenizer.decode(output_tokens[0],skip_special_tokens=True)
63
+
64
+ return caption
65
+
66
+
67
+
68
+
weights.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a57d90a916410779349b6f0c767b7fa14d664e8cce7a59e76a028888dcb214d
3
+ size 717125970