ncoop57 commited on
Commit
2fa6d49
·
1 Parent(s): 48d8206

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -31,9 +31,9 @@ Training is done for 5 epochs using AdamW optimizer and leaner decay learning ra
31
 
32
  ```
33
  python run_clm_apps.py \
34
- --output_dir ./gpt-neo-1.3B-apps \
35
  --model_name_or_path EleutherAI/gpt-neo-1.3B \
36
- --dataset_name ./apps.py \
37
  --dataset_config_name formatted \
38
  --do_train --do_eval \
39
  --block_size="1024" \
@@ -67,9 +67,9 @@ You can use this model directly with a pipeline for text generation. This exampl
67
 
68
  from transformers import AutoModelForCausalLM, AutoTokenizer, FlaxAutoModelForCausalLM
69
 
70
- model = AutoModelForCausalLM.from_pretrained("flax-community/gpt-code-clippy-125M-apps-alldata")
71
 
72
- tokenizer = AutoTokenizer.from_pretrained("flax-community/gpt-code-clippy-125M-apps-alldata")
73
 
74
  prompt = """
75
 
 
31
 
32
  ```
33
  python run_clm_apps.py \
34
+ --output_dir $HOME/gpt-neo-1.3B-apps \
35
  --model_name_or_path EleutherAI/gpt-neo-1.3B \
36
+ --dataset_name $HOME/gpt-code-clippy/data_processing/apps.py \
37
  --dataset_config_name formatted \
38
  --do_train --do_eval \
39
  --block_size="1024" \
 
67
 
68
  from transformers import AutoModelForCausalLM, AutoTokenizer, FlaxAutoModelForCausalLM
69
 
70
+ model = AutoModelForCausalLM.from_pretrained("flax-community/gpt-code-clippy-1.3B-apps")
71
 
72
+ tokenizer = AutoTokenizer.from_pretrained("flax-community/gpt-code-clippy-1.3B-apps")
73
 
74
  prompt = """
75