| from transformers import PretrainedConfig | |
| from typing import Sequence | |
| class AutoEncoder1dConfig(PretrainedConfig): | |
| model_type = "archinetai/autoencoder1d-AT-v1" | |
| def __init__( | |
| self, | |
| in_channels: int = 2, | |
| patch_size: int = 4, | |
| channels: int = 32, | |
| multipliers: Sequence[int] = [1, 2, 4, 8, 8, 8, 1], | |
| factors: Sequence[int] = [2, 2, 2, 1, 1, 1], | |
| num_blocks: Sequence[int] = [2, 2, 8, 8, 8, 8], | |
| bottleneck: str = 'tanh', | |
| **kwargs | |
| ): | |
| self.in_channels = in_channels | |
| self.patch_size = patch_size | |
| self.channels = channels | |
| self.multipliers = multipliers | |
| self.factors = factors | |
| self.num_blocks = num_blocks | |
| self.bottleneck = bottleneck | |
| super().__init__(**kwargs) | |