| # /// script | |
| # requires-python = ">=3.12" | |
| # dependencies = [ | |
| # "transformers", | |
| # "torch", | |
| # ] | |
| # /// | |
| try: | |
| # Load model directly | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B") | |
| model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B") | |
| messages = [ | |
| {"role": "user", "content": "Who are you?"}, | |
| ] | |
| inputs = tokenizer.apply_chat_template( | |
| messages, | |
| add_generation_prompt=True, | |
| tokenize=True, | |
| return_dict=True, | |
| return_tensors="pt", | |
| ).to(model.device) | |
| outputs = model.generate(**inputs, max_new_tokens=40) | |
| print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | |
| with open('LGAI-EXAONE_EXAONE-4.0-32B_1.txt', 'w') as f: | |
| f.write('Everything was good in LGAI-EXAONE_EXAONE-4.0-32B_1.txt') | |
| except Exception as e: | |
| with open('LGAI-EXAONE_EXAONE-4.0-32B_1.txt', 'w') as f: | |
| import traceback | |
| traceback.print_exc(file=f) | |
| finally: | |
| from huggingface_hub import upload_file | |
| upload_file( | |
| path_or_fileobj='LGAI-EXAONE_EXAONE-4.0-32B_1.txt', | |
| repo_id='model-metadata/custom_code_execution_files', | |
| path_in_repo='LGAI-EXAONE_EXAONE-4.0-32B_1.txt', | |
| repo_type='dataset', | |
| ) |