File size: 1,783 Bytes
e59560b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import os
import torch
from torch import distributed as dist
from transformers import GptOssForCausalLM, PreTrainedTokenizerFast
from transformers.distributed import DistributedConfig
def initialize_process():
# torchrun exports: RANK, LOCAL_RANK, WORLD_SIZE, MASTER_ADDR, MASTER_PORT
local_rank = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
dist.init_process_group(backend="nccl", device_id=local_rank)
def run_inference():
model_id = "openai/gpt-oss-20b"
tok = PreTrainedTokenizerFast.from_pretrained(model_id)
model = GptOssForCausalLM.from_pretrained(
model_id,
distributed_config=DistributedConfig(enable_expert_parallel=True),
dtype="auto",
).eval()
messages = [
{"role": "system", "content": "Be concise."},
{"role": "user", "content": "Explain KV caching briefly."},
]
inputs = tok.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
)
# Place inputs on *this process's* GPU
local_rank = int(os.environ["LOCAL_RANK"])
device = torch.device(f"cuda:{local_rank}")
inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()}
with torch.inference_mode():
out = model.generate(**inputs, max_new_tokens=128)
torch.cuda.synchronize(device)
# keep output from rank 0 only
dist.barrier(
device_ids=[int(os.environ["LOCAL_RANK"])]
)
if dist.get_rank() == 0:
print(tok.decode(out[0][inputs["input_ids"].shape[-1]:]))
def main():
initialize_process()
try:
run_inference()
finally:
dist.destroy_process_group()
if __name__ == "__main__":
main() |