Vaibhav-Singh commited on
Commit
a0e29cc
·
1 Parent(s): 92d35c2

transformerjs

Browse files
Files changed (9) hide show
  1. .gitattributes +0 -35
  2. .gitignore +2 -0
  3. Dockerfile +0 -21
  4. README.md +0 -10
  5. app.py +0 -61
  6. bun.lockb +0 -0
  7. index.ts +46 -0
  8. package.json +17 -0
  9. requirements.txt +0 -7
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ node_modules
2
+ cache
Dockerfile DELETED
@@ -1,21 +0,0 @@
1
- # Use the official Python slim image
2
- FROM python:3.8-slim
3
-
4
- # Set environment variable for Hugging Face cache
5
- ENV HF_HOME=/tmp/.huggingface
6
-
7
- # Working directory
8
- WORKDIR /app
9
-
10
- # Copy dependencies
11
- COPY requirements.txt .
12
- RUN pip install --no-cache-dir -r requirements.txt
13
-
14
- # Copy application code
15
- COPY . .
16
-
17
- # Expose the port FastAPI will run on
18
- EXPOSE 7860
19
-
20
- # Run the API
21
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: SmolLM2 135M
3
- emoji: 🌖
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
app.py DELETED
@@ -1,61 +0,0 @@
1
- from fastapi import FastAPI, HTTPException
2
- from pydantic import BaseModel
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- from typing import List
5
- import torch
6
-
7
- app = FastAPI(title="Language Model API")
8
-
9
- # Model configuration
10
- CHECKPOINT = "HuggingFaceTB/SmolLM2-135M-Instruct"
11
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
-
13
- # Initialize model and tokenizer
14
- try:
15
- tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
16
- model = AutoModelForCausalLM.from_pretrained(CHECKPOINT).to(DEVICE)
17
- except Exception as e:
18
- raise RuntimeError(f"Failed to load model: {str(e)}")
19
-
20
- class ChatMessage(BaseModel):
21
- role: str
22
- content: str
23
-
24
- class ChatRequest(BaseModel):
25
- messages: List[ChatMessage]
26
- max_new_tokens: int = 50
27
- temperature: float = 0.2
28
- top_p: float = 0.9
29
-
30
- @app.post("/generate")
31
- async def generate_response(request: ChatRequest):
32
- try:
33
- # Convert messages to the format expected by the model
34
- messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
35
-
36
- # Prepare input
37
- input_text = tokenizer.apply_chat_template(messages, tokenize=False)
38
- inputs = tokenizer.encode(input_text, return_tensors="pt").to(DEVICE)
39
-
40
- # Generate response
41
- outputs = model.generate(
42
- inputs,
43
- max_new_tokens=request.max_new_tokens,
44
- temperature=request.temperature,
45
- top_p=request.top_p,
46
- do_sample=True
47
- )
48
-
49
- # Decode and return response
50
- response_text = tokenizer.decode(outputs[0])
51
-
52
- return {
53
- "generated_text": response_text
54
- }
55
-
56
- except Exception as e:
57
- raise HTTPException(status_code=500, detail=str(e))
58
-
59
- if __name__ == "__main__":
60
- import uvicorn
61
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bun.lockb ADDED
Binary file (57.5 kB). View file
 
index.ts ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { pipeline } from "@huggingface/transformers";
2
+
3
+ function loadingBar(total: number, current: number, barLength = 20) {
4
+ const filled = Math.round((current / total) * barLength);
5
+ const empty = barLength - filled;
6
+
7
+ const bar = `[${"#".repeat(filled)}${" ".repeat(empty)}] ${Math.round(
8
+ (current / total) * 100
9
+ )}%`;
10
+ process.stdout.write("\r" + bar);
11
+ }
12
+
13
+ process.stdout.write("\n"); // Move
14
+
15
+ async function run() {
16
+ // Load a pre-trained model and tokenizer
17
+ const pipe = await pipeline(
18
+ "text-generation",
19
+ "HuggingFaceTB/SmolLM2-135M-Instruct",
20
+ // "HuggingFaceTB/SmolLM2-360M-Instruct",
21
+ // "HuggingFaceTB/SmolLM2-1.7B-Instruct",
22
+ {
23
+ cache_dir: "./cache",
24
+ progress_callback: (progress: any) => {
25
+ loadingBar(progress.total, progress.loaded);
26
+ },
27
+ }
28
+ );
29
+
30
+ // Perform sentiment analysis
31
+ const result = await pipe(
32
+ [
33
+ {
34
+ role: "user",
35
+ content: `Recomend a song for me based on these songs: 'Dheere Dheere Aap Mere', 'Zara Zara - Deep House Mix', 'Hey Minnale (From "Amaran") (Tamil)', 'Apna Bana Le (From "Bhediya")`,
36
+ },
37
+ ],
38
+ {
39
+ max_new_tokens: 100,
40
+ }
41
+ );
42
+
43
+ console.log(result);
44
+ }
45
+
46
+ run().catch(console.error);
package.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "test",
3
+ "version": "1.0.0",
4
+ "main": "index.js",
5
+ "scripts": {
6
+ "test": "echo \"Error: no test specified\" && exit 1"
7
+ },
8
+ "keywords": [],
9
+ "author": "",
10
+ "license": "ISC",
11
+ "type": "commonjs",
12
+ "description": "",
13
+ "dependencies": {
14
+ "@huggingface/transformers": "^3.3.1",
15
+ "@xenova/transformers": "^2.17.2"
16
+ }
17
+ }
requirements.txt DELETED
@@ -1,7 +0,0 @@
1
- fastapi
2
- uvicorn
3
- transformers
4
- torch
5
- peft
6
- huggingface_hub
7
- safetensors