import os import torch from torch import distributed as dist from transformers import GptOssForCausalLM, PreTrainedTokenizerFast from transformers.distributed import DistributedConfig def initialize_process(): # torchrun exports: RANK, LOCAL_RANK, WORLD_SIZE, MASTER_ADDR, MASTER_PORT local_rank = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) dist.init_process_group(backend="nccl", device_id=local_rank) def run_inference(): model_id = "openai/gpt-oss-20b" tok = PreTrainedTokenizerFast.from_pretrained(model_id) model = GptOssForCausalLM.from_pretrained( model_id, distributed_config=DistributedConfig(enable_expert_parallel=True), dtype="auto", ).eval() messages = [ {"role": "system", "content": "Be concise."}, {"role": "user", "content": "Explain KV caching briefly."}, ] inputs = tok.apply_chat_template( messages, add_generation_prompt=True, return_tensors="pt", return_dict=True, reasoning_effort="low", ) # Place inputs on *this process's* GPU local_rank = int(os.environ["LOCAL_RANK"]) device = torch.device(f"cuda:{local_rank}") inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()} with torch.inference_mode(): out = model.generate(**inputs, max_new_tokens=128) torch.cuda.synchronize(device) # keep output from rank 0 only dist.barrier( device_ids=[int(os.environ["LOCAL_RANK"])] ) if dist.get_rank() == 0: print(tok.decode(out[0][inputs["input_ids"].shape[-1]:])) def main(): initialize_process() try: run_inference() finally: dist.destroy_process_group() if __name__ == "__main__": main()