smoke. Crack. app
Browse filesFuck you

README.md
CHANGED
@@ -1,14 +1,83 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
|
|
9 |
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
```csv
|
13 |
kind,sound
|
14 |
dog,woof
|
@@ -23,5 +92,5 @@ The YAML section of the README does not contain anything related to loading the
|
|
23 |
---
|
24 |
size_categories:
|
25 |
- n<1K
|
26 |
-
---
|
27 |
```
|
|
|
1 |
+
f://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY ββ hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ΄βββββββββββββββ΄βββββββββββββββββββββ΄ββββββββββββββ- [data.csv](./data.csv)# Use a pipeline as a high-level helperfrom transformers import pipelinepipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-V3.1-Base", trust_remote_code=True)messages = [ {"role": "user", "content": "Who are you?"},]pipe(messages)# Load model directlyfrom transformers import AutoTokenizer, AutoModelForCausalLMtokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V3.1-Base", trust_remote_code=True)model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-V3.1-Base", trust_remote_code=True)
|
2 |
+
messages = [
|
3 |
+
{"role": "user", "content": "Who are you?"},
|
4 |
+
]
|
5 |
+
inputs = tokenizer.apply_chat_template(
|
6 |
+
messages,
|
7 |
+
add_generation_prompt=True,
|
8 |
+
tokenize=True,
|
9 |
+
return_dict=True,
|
10 |
+
return_tensors="pt",
|
11 |
+
import polars as pl
|
12 |
+
|
13 |
+
df = pl.read_csv('hf://datasets/fka/awesome-chatgpt-prompts/prompts.csv')
|
14 |
+
[inputs["input_ids"].shape[-1]:]))SELECT * FROM parquet_metadata('hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet');
|
15 |
+
|
16 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ¬βββββββββββββββ¬βββββββββββββββββββββ¬ββββββββββββββ
|
17 |
+
β file_name β row_group_id β row_group_num_rows β compression β
|
18 |
+
β import polars as pl
|
19 |
|
20 |
+
df = pl.read_csv('hf://datasets/fka/awesome-chatgpt-prompts/prompts.csv')
|
21 |
+
varchar β int64 β int64 β varchar β
|
22 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββΌβββββββββββββββΌβββββββββββββββββββββΌββββββββββββββ€
|
23 |
+
β hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY β
|
24 |
+
β hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY β
|
25 |
+
β hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY β
|
26 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ΄βββββββββββββββ΄βββββββββββββββββββββ΄ββββββββββββββ
|
27 |
+
import polars as pl
|
28 |
|
29 |
+
df = pl.read_csv('hf://datasets/fka/awesome-chatgpt-prompts/prompts.csv')
|
30 |
+
SELECT * FROM parquet_metadata('hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet');
|
31 |
|
32 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ¬βββββββββββββββ¬βββββββββββββββββββββ¬ββββββββββββββ
|
33 |
+
β file_name β row_group_id β row_group_num_rows β compression β
|
34 |
+
β varchar β int64 β int64 β varchar β
|
35 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββΌβββββββββββββββΌβββββββββββββββββββββΌββββββββββββοΏ½οΏ½β€
|
36 |
+
β hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY β
|
37 |
+
β hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY β
|
38 |
+
β hf://datasets/jamescalam/world-cities-geo@~parquet/default/train/0000.parquet β 0 β 1000 β SNAPPY β
|
39 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ΄βββββββββββββββ΄βββββββββββββββββββββ΄ββββββββββββββ
|
40 |
+
# Load model directly
|
41 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
42 |
|
43 |
+
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V3.1-Base", trust_remote_code=True)
|
44 |
+
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-V3.1-Base", trust_remote_code=True)
|
45 |
+
messages = [
|
46 |
+
{"role": "user", "content": "Who are you?"},
|
47 |
+
]
|
48 |
+
inputs = tokenizer.apply_chat_template(
|
49 |
+
messages,
|
50 |
+
add_generation_prompt=True,
|
51 |
+
tokenize=True,
|
52 |
+
return_dict=True,
|
53 |
+
return_tensors="pt",
|
54 |
+
).to(model.device)
|
55 |
+
|
56 |
+
outputs = model.generate(**inputs, max_new_tokens=40)
|
57 |
+
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))import polars as pl
|
58 |
+
|
59 |
+
df = pl.read_csv('hf://datasets/fka/awesome-chatgpt-prompts/prompts.csv')
|
60 |
+
import polars as pl
|
61 |
+
|
62 |
+
df = pl.read_csv('hf://datasets/fka/awesome-chatgpt-prompts/prompts.csv')
|
63 |
+
# Load model directly
|
64 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
65 |
+
|
66 |
+
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V3.1-Base", trust_remote_code=True)
|
67 |
+
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-V3.1-Base", trust_remote_code=True)
|
68 |
+
messages = [
|
69 |
+
{"role": "user", "content": "Who are you?"},
|
70 |
+
]
|
71 |
+
inputs = tokenizer.apply_chat_template(
|
72 |
+
messages,
|
73 |
+
add_generation_prompt=True,
|
74 |
+
tokenize=True,
|
75 |
+
return_dict=True,
|
76 |
+
return_tensors="pt",
|
77 |
+
).to(model.device)bu_kCH1yoA0uaxPUQVbtYHCE3UdRa32UGoskzUS_Qhv1ng
|
78 |
+
|
79 |
+
outputs = model.generate(**inputs, max_new_tokens=40)
|
80 |
+
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])GIT_LFS_SKIP_SMUDGE=1 git clone [email protected]:datasets/jupyter-agent/jupyter-agent-datasethf download jupyter-agent/jupyter-agent-dataset --repo-type=datasetgit lfs installgit clone [email protected]:datasets/jupyter-agent/jupyter-agent-dataset
|
81 |
```csv
|
82 |
kind,sound
|
83 |
dog,woof
|
|
|
92 |
---
|
93 |
size_categories:
|
94 |
- n<1K
|
95 |
+
---{"@context":{"@language":"en","@vocab":"https://schema.org/","citeAs":"cr:citeAs","column":"cr:column","conformsTo":"dct:conformsTo","cr":"http://mlcommons.org/croissant/","data":{"@id":"cr:data","@type":"@json"},"dataBiases":"cr:dataBiases","dataCollection":"cr:dataCollection","dataType":{"@id":"cr:dataType","@type":"@vocab"},"dct":"http://purl.org/dc/terms/","extract":"cr:extract","field":"cr:field","fileProperty":"cr:fileProperty","fileObject":"cr:fileObject","fileSet":"cr:fileSet","format":"cr:format","includes":"cr:includes","isLiveDataset":"cr:isLiveDataset","jsonPath":"cr:jsonPath","key":"cr:key","md5":"cr:md5","parentField":"cr:parentField","path":"cr:path","personalSensitiveInformation":"cr:personalSensitiveInformation","recordSet":"cr:recordSet","references":"cr:references","regex":"cr:regex","repeated":"cr:repeated","replace":"cr:replace","sc":"https://schema.org/","separator":"cr:separator","source":"cr:source","subField":"cr:subField","transform":"cr:transform"},"@type":"sc:Dataset","distribution":[{"@type":"cr:FileObject","@id":"repo","name":"repo","description":"The Hugging Face git repository.","contentUrl":"https://huggingface.co/datasets/datasets-examples/doc-formats-csv-1/tree/refs%2Fconvert%2Fparquet","encodingFormat":"git+https","sha256":"https://github.com/mlcommons/croissant/issues/80"},{"@type":"cr:FileSet","@id":"parquet-files-for-config-default","name":"parquet-files-for-config-default","description":"The underlying Parquet files as converted by Hugging Face (see: https://huggingface.co/docs/dataset-viewer/parquet).","containedIn":{"@id":"repo"},"encodingFormat":"application/x-parquet","includes":"default/*/*.parquet"}],"recordSet":[{"@type":"cr:RecordSet","dataType":"cr:Split","key":{"@id":"default_splits/split_name"},"@id":"default_splits","name":"default_splits","description":"Splits for the default config.","field":[{"@type":"cr:Field","@id":"default_splits/split_name","name":"split_name","description":"The name of the split.","dataType":"sc:Text"}],"data":[{"default_splits/split_name":"train"}]},{"@type":"cr:RecordSet","@id":"default","name":"default","description":"datasets-examples/doc-formats-csv-1 - 'default' subset","field":[{"@type":"cr:Field","@id":"default/split","name":"default/split","description":"Split to which the example belongs to.","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-default"},"extract":{"fileProperty":"fullpath"},"transform":{"regex":"default/(?:partial-)?(train)/.+parquet$"}},"references":{"field":{"@id":"default_splits/split_name"}}},{"@type":"cr:Field","@id":"default/kind","name":"default/kind","description":"Column 'kind' from the Hugging Face parquet file.","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-default"},"extract":{"column":"kind"}}},{"@type":"cr:Field","@id":"default/sound","name":"default/sound","description":"Column 'sound' from the Hugging Face parquet file.","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-default"},"extract":{"column":"sound"}}}]}],"conformsTo":"http://mlcommons.org/croissant/1.0","name":"doc-formats-csv-1","description":"\n\t\n\t\t\n\t\t[doc] formats - csv - 1\n\t\n\nThis dataset contains one csv file at the root:\n\ndata.csv\n\nkind,sound\ndog,woof\ncat,meow\npokemon,pika\nhuman,hello\n\nThe YAML section of the README does not contain anything related to loading the data (only the size category metadata):\n---\nsize_categories:\n- n<1K\n---\n\n","alternateName":["datasets-examples/doc-formats-csv-1"],"creator":{"@type":"Organization","name":"Datasets examples","url":"https://huggingface.co/datasets-examples"},"keywords":["< 1K","csv","Text","Datasets","pandas","Croissant","Polars","πΊπΈ Region: US"],"url":"https://huggingface.co/datasets/datasets-examples/doc-formats-csv-1"}
|
96 |
```
|