Text-to-Speech
IndexTTS

--- license: apache-2.0 datasets: - CoIR-Retrieval/CodeSearchNet-python-queries-corpus language: - en - ar - zh - ru - fr metrics: - loss - accuracy base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0 pipeline_tag: text-generation library_name: transformers tags: - chat - conversational - programming - algorithms - ethical-hacking - open-source - tinyllama new_version: v1.0 training_data: 156GB of diverse text data (including programming and algorithmic content) model-index: - name: TINYLAMA results: - task: type: text-generation dataset: type: custom name: Local Dataset (156GB) metrics: - type: loss value: TBD (To Be Determined after retraining) - type: accuracy value: TBD (To Be Determined after retraining) eval_results: - task: Text Generation dataset: Local Dataset (156GB) metrics: - name: Loss value: TBD (To Be Determined after retraining) - name: Accuracy value: TBD (To Be Determined after retraining)

#4
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "label2id": {
16
+ "LABEL_0": 0
17
+ },
18
+ "layer_norm_epsilon": 1e-05,
19
+ "model_type": "gpt2",
20
+ "n_ctx": 1024,
21
+ "n_embd": 768,
22
+ "n_head": 12,
23
+ "n_inner": null,
24
+ "n_layer": 6,
25
+ "n_positions": 1024,
26
+ "reorder_and_upcast_attn": false,
27
+ "resid_pdrop": 0.1,
28
+ "scale_attn_by_inverse_layer_idx": false,
29
+ "scale_attn_weights": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "torch_dtype": "float16",
42
+ "transformers_version": "4.51.1",
43
+ "use_cache": true,
44
+ "vocab_size": 50257
45
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.51.1"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74fc67449109c83aafcc06c26bd90d5ede698510ae13d0c9787f3133b9fa0dad
3
+ size 163832712
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2188eb8623f7e71b920ab341ef3ba9e600b2037942429d360f9f8c2290e47df5
3
+ size 327712058
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aad440af714c205e76efff6aa1e7af2d3eb478003db06214ecc6569fe02a4a1f
3
+ size 13990
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc7c32259d51730d9f585a8cfbc002399970b539c9a026c11ce95a37aa25dd07
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 3,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.3333333333333333,
14
+ "grad_norm": NaN,
15
+ "learning_rate": 5e-05,
16
+ "loss": 0.0,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.6666666666666666,
21
+ "grad_norm": NaN,
22
+ "learning_rate": 3.3333333333333335e-05,
23
+ "loss": 0.0,
24
+ "step": 2
25
+ },
26
+ {
27
+ "epoch": 1.0,
28
+ "grad_norm": NaN,
29
+ "learning_rate": 1.6666666666666667e-05,
30
+ "loss": 0.0,
31
+ "step": 3
32
+ }
33
+ ],
34
+ "logging_steps": 1,
35
+ "max_steps": 3,
36
+ "num_input_tokens_seen": 0,
37
+ "num_train_epochs": 1,
38
+ "save_steps": 1,
39
+ "stateful_callbacks": {
40
+ "TrainerControl": {
41
+ "args": {
42
+ "should_epoch_stop": false,
43
+ "should_evaluate": false,
44
+ "should_log": false,
45
+ "should_save": true,
46
+ "should_training_stop": true
47
+ },
48
+ "attributes": {}
49
+ }
50
+ },
51
+ "total_flos": 489931407360.0,
52
+ "train_batch_size": 1,
53
+ "trial_name": null,
54
+ "trial_params": null
55
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1abb072f49907d09fe0fe06ac83d238961aacd9cc5b832c75eb128aa72cdbd66
3
+ size 5304