|
--- |
|
license: apache-2.0 |
|
language: |
|
- en |
|
base_model: |
|
- PygmalionAI/Eleusis-12B |
|
pipeline_tag: text-generation |
|
--- |
|
|
|
This is an ONNX optimized version of [Eleusis-12B](https://huggingface.co/PygmalionAI/Eleusis-12B). |
|
For a more comprehensive info about the model's capabilities, please visit the original model's repo. |
|
|
|
## Inference |
|
### Requirements |
|
If you're on a CPU-only machine: |
|
|
|
```sh |
|
pip install onnxruntime |
|
``` |
|
|
|
If you have an NVIDIA GPU available: |
|
```sh |
|
pip uninstall onnxruntime -y |
|
pip install onnxruntime-gpu |
|
``` |
|
|
|
Make sure you have installed [CUDA Toolkit](https://developer.nvidia.com/cuda-12-4-0-download-archive) and [cuDNN](https://developer.nvidia.com/cudnn) |
|
|
|
```sh |
|
import onnxruntime as ort |
|
from transformers import AutoTokenizer |
|
import numpy as np |
|
import argparse |
|
|
|
def generate_text(prompt, num_tokens, model_path, tokenizer_path): |
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) |
|
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] |
|
session = ort.InferenceSession(model_path, providers=providers) |
|
|
|
input_ids = tokenizer(prompt, return_tensors="np").input_ids |
|
|
|
for _ in range(num_tokens): |
|
# Create attention mask and position ids |
|
attention_mask = np.ones_like(input_ids) |
|
position_ids = np.arange(input_ids.shape[1])[None, :] |
|
|
|
outputs = session.run( |
|
output_names=['logits'], |
|
input_feed={ |
|
'input_ids': input_ids, |
|
'attention_mask': attention_mask, |
|
'position_ids': position_ids |
|
} |
|
) |
|
|
|
next_token = np.argmax(outputs[0][0, -1, :]) |
|
|
|
input_ids = np.concatenate([input_ids, [[next_token]]], axis=1) |
|
|
|
return tokenizer.decode(input_ids[0], skip_special_tokens=True) |
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description='Generate text using ONNX model') |
|
parser.add_argument('prompt', type=str, help='Input prompt for generation') |
|
parser.add_argument('num_tokens', type=int, help='Number of tokens to generate') |
|
parser.add_argument('--model_path', type=str, default='model.onnx', |
|
help='Path to ONNX model file') |
|
parser.add_argument('--tokenizer_path', type=str, default='tokenizer', |
|
help='Path to tokenizer directory') |
|
|
|
args = parser.parse_args() |
|
|
|
result = generate_text(args.prompt, args.num_tokens, args.model_path, args.tokenizer_path) |
|
print(result) |
|
``` |
|
|
|
```sh |
|
python onnx_inference.py "Once upon a time" 512 --model_path /path/to/model.onnx --tokenizer_path /path/to/model/dir |
|
``` |
|
|
|
This is an example script, and not properly optimized. |