--- library_name: diffusers --- created using the following code based on https://github.com/huggingface/diffusers/blob/main/tests/pipelines/hidream/test_pipeline_hidream.py ```python import numpy as np import torch from transformers import ( AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, LlamaForCausalLM, T5EncoderModel, ) from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, HiDreamImagePipeline, HiDreamImageTransformer2DModel, ) def get_dummy_components(): torch.manual_seed(0) transformer = HiDreamImageTransformer2DModel( patch_size=2, in_channels=4, out_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=8, num_attention_heads=4, caption_channels=[32, 16], text_emb_dim=64, num_routed_experts=4, num_activated_experts=2, axes_dims_rope=(4, 2, 2), max_resolution=(32, 32), llama_layers=(0, 1), ).eval() torch.manual_seed(0) vae = AutoencoderKL(scaling_factor=0.3611, shift_factor=0.1159) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, max_position_embeddings=128, ) torch.manual_seed(0) text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) text_encoder_4 = LlamaForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") text_encoder_4.generation_config.pad_token_id = 1 tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer_4 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") scheduler = FlowMatchEulerDiscreteScheduler() components = { "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "text_encoder_3": text_encoder_3, "tokenizer_3": tokenizer_3, "text_encoder_4": text_encoder_4, "tokenizer_4": tokenizer_4, "transformer": transformer, } return components if __name__ == "__main__": components = get_dummy_components() pipeline = HiDreamImagePipeline(**components) pipeline.push_to_hub("hf-internal-testing/tiny-hidream-i1-pipe") ```