devmanpreet commited on
Commit
54c4d86
·
verified ·
1 Parent(s): aca0ce4

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. app.py +150 -0
  3. biofinetuned_partialEpoch1.pth +3 -0
  4. requirements.txt +4 -0
  5. train.csv +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ train.csv filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import tiktoken
4
+ import pandas as pd
5
+ import torch.nn as nn
6
+
7
+ GPT_CONFIG_124M = {
8
+ "vocab_size": 50257,
9
+ "context_length": 1024,
10
+ "emb_dim": 768,
11
+ "n_heads": 12,
12
+ "n_layers": 12,
13
+ "drop_rate": 0.1,
14
+ "qkv_bias": True
15
+ }
16
+
17
+ class multiheadv2(nn.Module):
18
+ def __init__(self, d_in, d_out, context_length, dropout, attention_head, boolbias):
19
+ super().__init__()
20
+ self.head_dim = d_out // attention_head
21
+ self.d_out = d_out
22
+ self.attention_head = attention_head
23
+ self.W_query = nn.Linear(d_in, d_out, bias=boolbias)
24
+ self.W_key = nn.Linear(d_in, d_out, bias=boolbias)
25
+ self.W_value = nn.Linear(d_in, d_out, bias=boolbias)
26
+ self.out_proj = nn.Linear(d_out, d_out)
27
+ self.dropout = nn.Dropout(dropout)
28
+ self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
29
+
30
+ def forward(self, x):
31
+ b, num_token, d_out = x.shape
32
+ keys = self.W_key(x)
33
+ queries = self.W_query(x)
34
+ values = self.W_value(x)
35
+ keys = keys.view(b, num_token, self.attention_head, self.head_dim).transpose(1, 2)
36
+ queries = queries.view(b, num_token, self.attention_head, self.head_dim).transpose(1, 2)
37
+ values = values.view(b, num_token, self.attention_head, self.head_dim).transpose(1, 2)
38
+ attn_score = queries @ keys.transpose(2, 3)
39
+ mask_bool = self.mask.bool()[:num_token, :num_token]
40
+ attn_score.masked_fill_(mask_bool, -torch.inf)
41
+ attn_weights = torch.softmax(attn_score / keys.shape[-1]**0.5, dim=-1)
42
+ attn_weights = self.dropout(attn_weights)
43
+ context_vec = (attn_weights @ values).transpose(1, 2).contiguous().view(b, num_token, self.d_out)
44
+ context_vec = self.out_proj(context_vec)
45
+ return context_vec
46
+
47
+ class LayerNorm(nn.Module):
48
+ def __init__(self, emb_dim):
49
+ super().__init__()
50
+ self.eps = 1e-5
51
+ self.scale_params = nn.Parameter(torch.ones(emb_dim))
52
+ self.shift_params = nn.Parameter(torch.zeros(emb_dim))
53
+
54
+ def forward(self, x):
55
+ mean = x.mean(dim=-1, keepdim=True)
56
+ var = x.var(dim=-1, keepdim=True, unbiased=False)
57
+ norm = (x - mean) / torch.sqrt(var + self.eps)
58
+ return norm * self.scale_params + self.shift_params
59
+
60
+ class GELU(nn.Module):
61
+ def forward(self, x):
62
+ return 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2.0 / torch.pi)) * (x + 0.044715 * torch.pow(x, 3))))
63
+
64
+ class feedforward(nn.Module):
65
+ def __init__(self, config):
66
+ super().__init__()
67
+ self.layers = nn.Sequential(
68
+ nn.Linear(config['emb_dim'], config['emb_dim'] * 4),
69
+ GELU(),
70
+ nn.Linear(config['emb_dim'] * 4, config['emb_dim']),
71
+ )
72
+
73
+ def forward(self, x):
74
+ return self.layers(x)
75
+
76
+ class TransformerBlock(nn.Module):
77
+ def __init__(self, config):
78
+ super().__init__()
79
+ self.attn = multiheadv2(d_in=config['emb_dim'], d_out=config['emb_dim'], context_length=config['context_length'], dropout=config['drop_rate'], attention_head=config['n_heads'], boolbias=config['qkv_bias'])
80
+ self.Layernorm1 = LayerNorm(config['emb_dim'])
81
+ self.Layernorm2 = LayerNorm(config['emb_dim'])
82
+ self.feedforw = feedforward(config)
83
+ self.dropout = nn.Dropout(config['drop_rate'])
84
+
85
+ def forward(self, x):
86
+ skip = x
87
+ x = self.Layernorm1(x)
88
+ x = self.attn(x)
89
+ x = self.dropout(x)
90
+ x = x + skip
91
+ skip = x
92
+ x = self.Layernorm2(x)
93
+ x = self.feedforw(x)
94
+ x = self.dropout(x)
95
+ x = x + skip
96
+ return x
97
+
98
+ class GPT_2(nn.Module):
99
+ def __init__(self, cfg, num_classes):
100
+ super().__init__()
101
+ self.token_emb = nn.Embedding(cfg['vocab_size'], cfg["emb_dim"])
102
+ self.pos_emb = nn.Embedding(cfg['context_length'], cfg["emb_dim"])
103
+ self.drop_emb = nn.Dropout(cfg["drop_rate"])
104
+ self.trf_blocks = nn.Sequential(*[TransformerBlock(cfg) for _ in range(cfg["n_layers"])])
105
+ self.final_norm = LayerNorm(cfg["emb_dim"])
106
+ self.out_head = nn.Linear(cfg["emb_dim"], num_classes)
107
+
108
+ def forward(self, inputidx):
109
+ batch_size, seq = inputidx.shape
110
+ tokens = self.token_emb(inputidx)
111
+ pos_embeds = self.pos_emb(torch.arange(seq, device=inputidx.device))
112
+ x = tokens + pos_embeds
113
+ x = self.drop_emb(x)
114
+ x = self.trf_blocks(x)
115
+ x = self.final_norm(x)
116
+ logits = self.out_head(x[:, -1])
117
+ return logits
118
+
119
+ tokenizer = tiktoken.get_encoding("gpt2")
120
+ pad_token_id = tokenizer.eot_token
121
+
122
+ df_temp = pd.read_csv("train.csv")
123
+ label_mapping = dict(enumerate(df_temp["target"].astype("category").cat.categories))
124
+ num_classes = len(label_mapping)
125
+ inv_label_mapping = {v: k for k, v in label_mapping.items()}
126
+
127
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
128
+ model = GPT_2(GPT_CONFIG_124M, num_classes)
129
+ model.load_state_dict(torch.load("biofinetuned_partialEpoch1.pth", map_location=device))
130
+ model.to(device)
131
+ model.eval()
132
+
133
+ def classify_review(text, max_length=128):
134
+ input_ids = tokenizer.encode(text)[:max_length]
135
+ input_ids += [pad_token_id] * (max_length - len(input_ids))
136
+ input_tensor = torch.tensor(input_ids, device=device).unsqueeze(0)
137
+ with torch.no_grad():
138
+ logits = model(input_tensor)
139
+ predicted_label = torch.argmax(logits, dim=-1).item()
140
+ return label_mapping[predicted_label]
141
+
142
+ iface = gr.Interface(
143
+ fn=classify_review,
144
+ inputs=gr.Textbox(label="Enter Medical Abstract / Review"),
145
+ outputs=gr.Textbox(label="Predicted Category"),
146
+ title="MedGPT",
147
+ description="Fast biomedical text classifier trained on domain-specific corpus"
148
+ )
149
+
150
+ iface.launch()
biofinetuned_partialEpoch1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1976e67402bb6817609830f3e9188bac1baf2aa1f5e1126d7830244a426fe8c3
3
+ size 548184304
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ tiktoken
4
+ pandas
train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:099d9b6ebd52daa2b0c714ffdd40e02106b2a1ed87ea3fed4ea2886eda1ad870
3
+ size 34433298