sharpenb commited on
Commit
b239597
·
verified ·
1 Parent(s): c605793

Upload folder using huggingface_hub (#7)

Browse files

- d3e7b9e2bc10513245b5b7e876de23cc7a698d0ed7c514183d6bd12f9615b38f (02ff00e0b738a64850547eab1953763d07d196ef)

Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +1 -1
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmplu1koe239lgp18yx",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmp80ljzccjusn6hpno",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1687db9f5976497cfe245f3882c6630c2b4b2ace71526b64b3e9266692f699c4
3
  size 631522528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed9b3fcff8fe107b7bc68bebcd00566577f312876def836acedc71fa40d2092
3
  size 631522528
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmplu1koe23",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmp80ljzccj",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}