PKSGIN commited on
Commit
3b5c8d0
·
verified ·
1 Parent(s): fbdc4f8

Upload folder using huggingface_hub

Browse files
0000200_adapters.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dbaa673ec1a3a696f2803cec31174481ab3d1b523ccfc0f243988a66bcfc8a7
3
+ size 3206027
0000400_adapters.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb53af4d2b7c10f55982b1d7922a4d95633601e29c1e5d3f20be1669014c3e9
3
+ size 3206027
0000600_adapters.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:165e22ae3145f6eb1920691ba8b5b5cc6d4e036fd813c8935dcb4593a115c9d4
3
+ size 3206027
0000800_adapters.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:debda81f5dee2927221584911f1dac33876adbe1debb5d4cc04d7556fe142045
3
+ size 3206027
0001000_adapters.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:802542d041917db7f85d855cf831f26f5f1fdcb763e9f3c7e479df7f613b2aa2
3
+ size 3206027
README.md ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mlx-community/gemma-2-2b-it-4bit
4
+ tags:
5
+ - mlx
6
+ - lora
7
+ - shell-commands
8
+ - gemma
9
+ - instruction-tuning
10
+ language:
11
+ - en
12
+ library_name: mlx-lm
13
+ ---
14
+
15
+ # Shell Command Assistant - Gemma 2B LoRA Adapters
16
+
17
+ This model provides LoRA (Low-Rank Adaptation) adapters for the Gemma 2B model, fine-tuned to help users with shell commands. It can answer questions about common Unix/Linux shell commands with high accuracy.
18
+
19
+ ## Model Performance
20
+ - **Accuracy**: 100% on core shell commands
21
+ - **Base Model**: mlx-community/gemma-2-2b-it-4bit
22
+ - **Adapter Size**: ~18MB (much smaller than full model)
23
+ - **Training**: Fine-tuned on 26 diverse shell command examples
24
+
25
+ ## Quick Start
26
+
27
+ ```python
28
+ from mlx_lm import load, generate
29
+
30
+ # Load the model with adapters
31
+ model, tokenizer = load(
32
+ "mlx-community/gemma-2-2b-it-4bit",
33
+ adapter_path="PKSGIN/MyGemma270_Shellcommands"
34
+ )
35
+
36
+ # Ask a question
37
+ question = "How do I check disk space?"
38
+ prompt = f"### Human: {question}\n### Assistant:"
39
+ response = generate(model, tokenizer, prompt=prompt, max_tokens=50, verbose=False)
40
+ print(response.strip())
41
+ ```
42
+
43
+ ## Example Outputs
44
+
45
+ ```python
46
+ # Question: How do I list all files including hidden ones?
47
+ # Answer: Use ls -la to show all files including hidden ones.
48
+
49
+ # Question: How do I find all Python files?
50
+ # Answer: Use find . -name "*.py" to find all Python files.
51
+
52
+ # Question: How do I check memory usage?
53
+ # Answer: Use free -h to see memory usage in human-readable format.
54
+ ```
55
+
56
+ ## Supported Commands
57
+
58
+ The model can help with these types of shell commands:
59
+
60
+ - **File Operations**: `ls`, `cp`, `mv`, `rm`, `mkdir`, `find`
61
+ - **System Monitoring**: `ps`, `top`, `free`, `df`
62
+ - **Network**: `netstat`, `lsof`, `ping`
63
+ - **File Content**: `cat`, `grep`, `wc`, `tail`, `head`
64
+ - **Archives**: `tar`, `zip`, `unzip`
65
+ - **Permissions**: `chmod`, `chown`
66
+ - **Process Management**: `kill`, background processes
67
+
68
+ ## Training Details
69
+
70
+ - **Method**: LoRA (Low-Rank Adaptation)
71
+ - **Rank**: 4, **Alpha**: 8, **Dropout**: 0.0
72
+ - **Training Data**: 26 unique shell commands × 60 repetitions = ~1560 examples
73
+ - **Format**: Human/Assistant conversation format
74
+ - **Iterations**: ~1000 until convergence
75
+
76
+ ## Installation
77
+
78
+ ```bash
79
+ pip install mlx-lm>=0.19.0
80
+ ```
81
+
82
+ ## Usage Examples
83
+
84
+ ```python
85
+ from mlx_lm import load, generate
86
+
87
+ model, tokenizer = load(
88
+ "mlx-community/gemma-2-2b-it-4bit",
89
+ adapter_path="PKSGIN/MyGemma270_Shellcommands"
90
+ )
91
+
92
+ questions = [
93
+ "How do I compress files?",
94
+ "How do I see running processes?",
95
+ "How do I find large files?",
96
+ "How do I check network connections?"
97
+ ]
98
+
99
+ for question in questions:
100
+ prompt = f"### Human: {question}\n### Assistant:"
101
+ response = generate(model, tokenizer, prompt=prompt, max_tokens=50)
102
+ print(f"Q: {question}")
103
+ print(f"A: {response.strip()}")
104
+ print()
105
+ ```
106
+
107
+ ## License
108
+
109
+ Apache 2.0 (following base Gemma model)
110
+
111
+ ## Acknowledgments
112
+
113
+ Built using MLX-LM and based on Google's Gemma 2B model.
adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adapter_file": "adapters.safetensors",
3
+ "adapter_path": "gemma270m_adapters",
4
+ "batch_size": 1,
5
+ "data": "expanded_shell_data",
6
+ "fine_tune_type": "lora",
7
+ "grad_checkpoint": false,
8
+ "iters": 1000,
9
+ "learning_rate": 0.0001,
10
+ "lora_layers": 8,
11
+ "lora_parameters": {
12
+ "rank": 4,
13
+ "alpha": 8,
14
+ "dropout": 0.0,
15
+ "scale": 10.0
16
+ },
17
+ "lr_schedule": null,
18
+ "max_seq_length": 256,
19
+ "model": "mlx-community/gemma-2-2b-it-4bit",
20
+ "num_layers": 8,
21
+ "optimizer": "adamw",
22
+ "optimizer_config": {},
23
+ "q_lora_quantization_config": null,
24
+ "resume_adapter_file": null,
25
+ "save_every": 200,
26
+ "seed": 42,
27
+ "steps_per_eval": 200,
28
+ "steps_per_report": 50,
29
+ "steps_per_save": 200,
30
+ "test": false,
31
+ "test_batches": 20,
32
+ "train": true,
33
+ "use_q_lora": false,
34
+ "val_batches": 20,
35
+ "wandb": null,
36
+ "warmup": 100
37
+ }
adapter_info.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "mlx-community/gemma-2-2b-it-4bit",
3
+ "library": "mlx-lm",
4
+ "model_type": "gemma2",
5
+ "lora_config": {
6
+ "rank": 4,
7
+ "alpha": 8,
8
+ "dropout": 0.0,
9
+ "target_modules": [
10
+ "q_proj",
11
+ "v_proj"
12
+ ]
13
+ },
14
+ "training_info": {
15
+ "dataset_size": 1560,
16
+ "unique_commands": 26,
17
+ "accuracy": "100%",
18
+ "format": "Human/Assistant"
19
+ }
20
+ }
adapters.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:802542d041917db7f85d855cf831f26f5f1fdcb763e9f3c7e479df7f613b2aa2
3
+ size 3206027