Safetensors
custom_code
Yin-Xie commited on
Commit
47c9186
·
verified ·
1 Parent(s): e843cc6

Upload inference.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference.py +2 -8
inference.py CHANGED
@@ -1,6 +1,6 @@
1
  from transformers import AutoTokenizer, AutoProcessor, AutoModelForCausalLM
2
  from qwen_vl_utils import process_vision_info
3
- model_path = "/vlm/yinxie/code/megatron_ricevl/checkpoint/date_2025_09_12_85malign_26msft_qwen3_8b_resample_oldparameters_pp2-hf"
4
 
5
  # default: Load the model on the available device(s)
6
  model = AutoModelForCausalLM.from_pretrained(
@@ -10,19 +10,13 @@ model = AutoModelForCausalLM.from_pretrained(
10
  # default processer
11
  processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
12
 
13
- # The default range for the number of visual tokens per image in the model is 4-16384.
14
- # You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost.
15
- # min_pixels = 256*28*28
16
- # max_pixels = 1280*28*28
17
- # processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
18
-
19
  messages = [
20
  {
21
  "role": "user",
22
  "content": [
23
  {
24
  "type": "image",
25
- "image": "/vlm/yinxie/code/sakura.png",
26
  },
27
  {"type": "text", "text": "Describe this image."},
28
  ],
 
1
  from transformers import AutoTokenizer, AutoProcessor, AutoModelForCausalLM
2
  from qwen_vl_utils import process_vision_info
3
+ model_path = "lmms-lab/LLaVA-One-Vision-1.5-8B-Instruct"
4
 
5
  # default: Load the model on the available device(s)
6
  model = AutoModelForCausalLM.from_pretrained(
 
10
  # default processer
11
  processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
12
 
 
 
 
 
 
 
13
  messages = [
14
  {
15
  "role": "user",
16
  "content": [
17
  {
18
  "type": "image",
19
+ "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
20
  },
21
  {"type": "text", "text": "Describe this image."},
22
  ],