Image-Text-to-Text
Transformers
TensorBoard
Safetensors
feature-extraction
conversational
custom_code
Yin-Xie commited on
Commit
e843cc6
·
verified ·
1 Parent(s): 16dae6f

Upload inference.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference.py +54 -0
inference.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoProcessor, AutoModelForCausalLM
2
+ from qwen_vl_utils import process_vision_info
3
+ model_path = "/vlm/yinxie/code/megatron_ricevl/checkpoint/date_2025_09_12_85malign_26msft_qwen3_8b_resample_oldparameters_pp2-hf"
4
+
5
+ # default: Load the model on the available device(s)
6
+ model = AutoModelForCausalLM.from_pretrained(
7
+ model_path, torch_dtype="auto", device_map="auto", trust_remote_code=True
8
+ )
9
+
10
+ # default processer
11
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
12
+
13
+ # The default range for the number of visual tokens per image in the model is 4-16384.
14
+ # You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost.
15
+ # min_pixels = 256*28*28
16
+ # max_pixels = 1280*28*28
17
+ # processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
18
+
19
+ messages = [
20
+ {
21
+ "role": "user",
22
+ "content": [
23
+ {
24
+ "type": "image",
25
+ "image": "/vlm/yinxie/code/sakura.png",
26
+ },
27
+ {"type": "text", "text": "Describe this image."},
28
+ ],
29
+ }
30
+ ]
31
+
32
+ # Preparation for inference
33
+ text = processor.apply_chat_template(
34
+ messages, tokenize=False, add_generation_prompt=True
35
+ )
36
+ image_inputs, video_inputs = process_vision_info(messages)
37
+ inputs = processor(
38
+ text=[text],
39
+ images=image_inputs,
40
+ videos=video_inputs,
41
+ padding=True,
42
+ return_tensors="pt",
43
+ )
44
+ inputs = inputs.to("cuda")
45
+
46
+ # Inference: Generation of the output
47
+ generated_ids = model.generate(**inputs, max_new_tokens=1024)
48
+ generated_ids_trimmed = [
49
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
50
+ ]
51
+ output_text = processor.batch_decode(
52
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
53
+ )
54
+ print(output_text)