bigband commited on
Commit
fbe7af9
·
verified ·
1 Parent(s): 6b8c441

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ library_name: transformers
4
+ pipeline_tag: text-generation
5
+ extra_gated_heading: Access Gemma on Hugging Face
6
+ extra_gated_prompt: >-
7
+ To access Gemma on Hugging Face, you’re required to review and agree to
8
+ Google’s usage license. To do this, please ensure you’re logged in to Hugging
9
+ Face and click below. Requests are processed immediately.
10
+ extra_gated_button_content: Acknowledge license
11
+ base_model: google/gemma-3-27b-it
12
+ tags:
13
+ - transformers
14
+ - gemma3
15
+ - gemma
16
+ - google
17
+ - Bifröst
18
+ - Bifrost
19
+ - code
20
+ ---
21
+
22
+ ## Bifröst-27B
23
+
24
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64a834a8895fd6416e29576f/sAXfe0cQdULI_GEVxBstw.png)
25
+
26
+ Bifröst-27B is an advanced AI model built upon gemma3 architecture, specifically fine-tuned for secure and efficient enterprise-grade code generation with reasoning. Designed to meet rigorous standards of safety, accuracy, and reliability, Bifröst empowers organizations to streamline software development workflows while prioritizing security and compliance.
27
+
28
+ ### Model Details
29
+ - **Model Name:** Bifröst-27B
30
+ - **Base Architecture:** gemma3
31
+ - **Application:** Enterprise Secure Code Generation
32
+ - **Release Date:** 16-March-2025
33
+
34
+ ### Intended Use
35
+ Bifröst is designed explicitly for:
36
+ - Generating secure, efficient, and high-quality code.
37
+ - Supporting development tasks within regulated enterprise environments.
38
+ - Enhancing productivity by automating routine coding tasks without compromising security.
39
+
40
+ ### Features
41
+ - **Security-Focused Training:** Specialized training regimen emphasizing secure coding practices, vulnerability reduction, and adherence to security standards.
42
+ - **Enterprise-Optimized Performance:** Tailored to support various programming languages and enterprise frameworks with robust, context-aware suggestions.
43
+ - **Compliance-Driven Design:** Incorporates features to aid in maintaining compliance with industry-specific standards (e.g., GDPR, HIPAA, SOC 2).
44
+
45
+ ### Limitations
46
+ - Bifröst should be used under human supervision to ensure code correctness and security compliance.
47
+ - Model-generated code should undergo appropriate security and quality assurance checks before deployment.
48
+
49
+ ### Ethical Considerations
50
+ - Users are encouraged to perform regular audits and compliance checks on generated outputs.
51
+ - Enterprises should implement responsible AI practices to mitigate biases or unintended consequences.
52
+
53
+ ### Usage
54
+ Below are some quick-start instructions for using the model with the `transformers` library.
55
+
56
+ #### Installation
57
+ ```sh
58
+ $ pip install git+https://github.com/huggingface/[email protected]
59
+ ```
60
+
61
+ #### Running with the `pipeline` API
62
+ ```python
63
+ from transformers import pipeline
64
+ import torch
65
+
66
+ pipe = pipeline(
67
+ "text-generation",
68
+ model="OpenGenerativeAI/Bifrost-27B",
69
+ device="cuda",
70
+ torch_dtype=torch.bfloat16
71
+ )
72
+
73
+ messages = [{"role": "user", "content": "Generate a secure API key management system."}]
74
+ output = pipe(text=messages, max_new_tokens=200)
75
+ print(output[0]["generated_text"])
76
+ ```
77
+
78
+ ## Terms of Use
79
+ This model is released under the **Gemma license**. Users must comply with [Google's Gemma Terms of Use](https://ai.google.dev/gemma/terms), including restrictions on redistribution, modification, and commercial use.
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<image_soft_token>": 262144}
chat_template.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"}
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "eoi_token_index": 256000,
7
+ "eos_token_id": [
8
+ 1,
9
+ 106
10
+ ],
11
+ "image_token_index": 262144,
12
+ "initializer_range": 0.02,
13
+ "mm_tokens_per_image": 256,
14
+ "model_type": "gemma3",
15
+ "text_config": {
16
+ "head_dim": 128,
17
+ "hidden_size": 5376,
18
+ "intermediate_size": 21504,
19
+ "model_type": "gemma3_text",
20
+ "num_attention_heads": 32,
21
+ "num_hidden_layers": 62,
22
+ "num_key_value_heads": 16,
23
+ "query_pre_attn_scalar": 168,
24
+ "rope_scaling": {
25
+ "factor": 8.0,
26
+ "rope_type": "linear"
27
+ },
28
+ "sliding_window": 1024
29
+ },
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.50.0.dev0",
32
+ "vision_config": {
33
+ "hidden_size": 1152,
34
+ "image_size": 896,
35
+ "intermediate_size": 4304,
36
+ "model_type": "siglip_vision_model",
37
+ "num_attention_heads": 16,
38
+ "num_hidden_layers": 27,
39
+ "patch_size": 14,
40
+ "vision_use_head": false
41
+ },
42
+ "_name_or_path": "bigband/AllseeingMictlantecuhtli"
43
+ }
generation_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token_id": 2, "cache_implementation": "hybrid", "do_sample": true, "eos_token_id": [1, 106], "pad_token_id": 0, "top_k": 64, "top_p": 0.95, "transformers_version": "4.50.0.dev0"}
model-00001-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d16d1ed8baa11e7e22ba70970aedd26313de5f946e818664ef0cf9848599e49b
3
+ size 4854573704
model-00002-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bda690590d7fe45e3130d4d4b49b174696e768ab26c25b3a3675c422d536ee5d
3
+ size 4954792952
model-00003-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf9aa3b6d6d6fb084632830f910dbaa0ce79c70e9a7810d565a95d90fa58ffc0
3
+ size 4954792984
model-00004-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f173cb8144d08c87d5fe9c5b6d2b93c2d5959fb76bc0191683b0cc6042a29906
3
+ size 4954793024
model-00005-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f516cc0a95bfc2a667b278b31d8e86dc3f5cd45ade5ef81065f0f23635ffc595
3
+ size 4954793024
model-00006-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3624e95f4dd62a8e73836a29aec3906fae6c412de235736c81908423f43c3125
3
+ size 4954793024
model-00007-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6dbfe2faca89574d08be0183b354bcf28dbf26b8495976bb8f15ae245408e47
3
+ size 4954793024
model-00008-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7694e429cc4a53e0ce03c526084ff4e2c29b009cfebad97ec89f07117c8af40
3
+ size 4954793024
model-00009-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356a4df4fd246ef1ca42a7bebdded148dc32be79f72d146b4981aa3f1e4f282f
3
+ size 4954793024
model-00010-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494c34038c240e8d005b1a7a1e9c42a8dae39af65abdf1dcd070c29b9619706b
3
+ size 4954793024
model-00011-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfa5b24b75b50ae1cf4c52f0cd0d6a39f821665a6f9c0def8d801f6db2a52fd2
3
+ size 4954793024
model-00012-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d64fd0b96ed24709fcb1eb6cbfa841f3a6f3839dc6fc053ebca324bf161d1f49
3
+ size 462476704
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_convert_rgb": null, "do_normalize": true, "do_pan_and_scan": null, "do_rescale": true, "do_resize": true, "image_mean": [0.5, 0.5, 0.5], "image_processor_type": "Gemma3ImageProcessor", "image_seq_length": 256, "image_std": [0.5, 0.5, 0.5], "pan_and_scan_max_num_crops": null, "pan_and_scan_min_crop_size": null, "pan_and_scan_min_ratio_to_activate": null, "processor_class": "Gemma3Processor", "resample": 2, "rescale_factor": 0.00392156862745098, "size": {"height": 896, "width": 896}}
processor_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"image_seq_length": 256, "processor_class": "Gemma3Processor"}
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"boi_token": "<start_of_image>", "bos_token": {"content": "<bos>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "eoi_token": "<end_of_image>", "eos_token": {"content": "<eos>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "image_token": "<image_soft_token>", "pad_token": {"content": "<pad>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}, "unk_token": {"content": "<unk>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false}}
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a37445c55028d6406e9490d2be970bf316b87ecc5606544f721201ea43c4c6eb
3
+ size 20323114
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff