Update README.md
Browse files
README.md
CHANGED
|
@@ -54,7 +54,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
| 54 |
input_text = "Your paragraph here..."
|
| 55 |
inputs = tokenizer(input_text, return_tensors="pt")
|
| 56 |
outputs = model.generate(**inputs, max_length=512)
|
| 57 |
-
decoded_output = tokenizer.decode(outputs, skip_special_tokens=True)
|
| 58 |
|
| 59 |
# Process the output to get a list of keywords (split and remove duplicates)
|
| 60 |
keywords = list(set(decoded_output.split('||')))
|
|
|
|
| 54 |
input_text = "Your paragraph here..."
|
| 55 |
inputs = tokenizer(input_text, return_tensors="pt")
|
| 56 |
outputs = model.generate(**inputs, max_length=512)
|
| 57 |
+
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 58 |
|
| 59 |
# Process the output to get a list of keywords (split and remove duplicates)
|
| 60 |
keywords = list(set(decoded_output.split('||')))
|