ariG23498 HF Staff commited on
Commit
e59560b
·
verified ·
1 Parent(s): 58b5e52

Create ep_gpt_oss.py

Browse files
Files changed (1) hide show
  1. ep_gpt_oss.py +59 -0
ep_gpt_oss.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch import distributed as dist
4
+ from transformers import GptOssForCausalLM, PreTrainedTokenizerFast
5
+ from transformers.distributed import DistributedConfig
6
+
7
+ def initialize_process():
8
+ # torchrun exports: RANK, LOCAL_RANK, WORLD_SIZE, MASTER_ADDR, MASTER_PORT
9
+ local_rank = int(os.environ["LOCAL_RANK"])
10
+ torch.cuda.set_device(local_rank)
11
+ dist.init_process_group(backend="nccl", device_id=local_rank)
12
+
13
+ def run_inference():
14
+ model_id = "openai/gpt-oss-20b"
15
+ tok = PreTrainedTokenizerFast.from_pretrained(model_id)
16
+
17
+ model = GptOssForCausalLM.from_pretrained(
18
+ model_id,
19
+ distributed_config=DistributedConfig(enable_expert_parallel=True),
20
+ dtype="auto",
21
+ ).eval()
22
+
23
+ messages = [
24
+ {"role": "system", "content": "Be concise."},
25
+ {"role": "user", "content": "Explain KV caching briefly."},
26
+ ]
27
+ inputs = tok.apply_chat_template(
28
+ messages,
29
+ add_generation_prompt=True,
30
+ return_tensors="pt",
31
+ return_dict=True,
32
+ reasoning_effort="low",
33
+ )
34
+
35
+ # Place inputs on *this process's* GPU
36
+ local_rank = int(os.environ["LOCAL_RANK"])
37
+ device = torch.device(f"cuda:{local_rank}")
38
+ inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()}
39
+
40
+ with torch.inference_mode():
41
+ out = model.generate(**inputs, max_new_tokens=128)
42
+ torch.cuda.synchronize(device)
43
+
44
+ # keep output from rank 0 only
45
+ dist.barrier(
46
+ device_ids=[int(os.environ["LOCAL_RANK"])]
47
+ )
48
+ if dist.get_rank() == 0:
49
+ print(tok.decode(out[0][inputs["input_ids"].shape[-1]:]))
50
+
51
+ def main():
52
+ initialize_process()
53
+ try:
54
+ run_inference()
55
+ finally:
56
+ dist.destroy_process_group()
57
+
58
+ if __name__ == "__main__":
59
+ main()