jupyterjazz commited on
Commit
660fe4c
·
1 Parent(s): 725b8ba

chore: remove prints

Browse files

Signed-off-by: jupyterjazz <[email protected]>

Files changed (1) hide show
  1. modeling_jina_embeddings_v4.py +0 -8
modeling_jina_embeddings_v4.py CHANGED
@@ -309,13 +309,7 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
309
  with torch.no_grad():
310
  batch = {k: v.to(self.device) for k, v in batch.items()}
311
  with torch.autocast(device_type=torch.device(self.device).type):
312
- for key, value in batch.items():
313
- if hasattr(value, 'shape'):
314
- print(f"{key}: {value.shape}")
315
- else:
316
- print(f"{key}: {type(value)}")
317
  embeddings = self(**batch)
318
- print(embeddings.single_vec_emb.shape, embeddings.multi_vec_emb.shape)
319
  if vector_type == "single_vector":
320
  embeddings = embeddings.single_vec_emb
321
  if truncate_dim is not None:
@@ -446,8 +440,6 @@ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
446
  encode_kwargs = self._validate_encoding_params(vector_type, truncate_dim)
447
 
448
  is_single = len(images) == 1
449
- print(is_single)
450
- print(len(images))
451
  embeddings = self._process_batches(
452
  data=images,
453
  processor_fn=self.processor.process_images,
 
309
  with torch.no_grad():
310
  batch = {k: v.to(self.device) for k, v in batch.items()}
311
  with torch.autocast(device_type=torch.device(self.device).type):
 
 
 
 
 
312
  embeddings = self(**batch)
 
313
  if vector_type == "single_vector":
314
  embeddings = embeddings.single_vec_emb
315
  if truncate_dim is not None:
 
440
  encode_kwargs = self._validate_encoding_params(vector_type, truncate_dim)
441
 
442
  is_single = len(images) == 1
 
 
443
  embeddings = self._process_batches(
444
  data=images,
445
  processor_fn=self.processor.process_images,