Changes model dims
Browse files
    	
        .DS_Store
    ADDED
    
    | Binary file (6.15 kB). View file | 
|  | 
    	
        __pycache__/brlp_lite.cpython-310.pyc
    DELETED
    
    | Binary file (18.2 kB) | 
|  | 
    	
        brlp_lite.py
    CHANGED
    
    | @@ -72,10 +72,11 @@ INPUT_SHAPE_1mm = (182, 218, 182) | |
| 72 | 
             
            INPUT_SHAPE_1p5mm = (122, 146, 122)   
         | 
| 73 |  | 
| 74 | 
             
            # Adjusting the dimensions to be divisible by 8 (2^3 where 3 are the downsampling layers of the AE)
         | 
| 75 | 
            -
            INPUT_SHAPE_AE = (120, 144, 120) | 
|  | |
| 76 |  | 
| 77 | 
             
            # Latent shape of the autoencoder 
         | 
| 78 | 
            -
            LATENT_SHAPE_AE = ( | 
| 79 |  | 
| 80 |  | 
| 81 | 
             
            def load_if(checkpoints_path: Optional[str], network: nn.Module) -> nn.Module:
         | 
| @@ -111,7 +112,7 @@ def init_autoencoder(checkpoints_path: Optional[str] = None) -> nn.Module: | |
| 111 | 
             
                autoencoder = AutoencoderKL(spatial_dims=3, 
         | 
| 112 | 
             
                                            in_channels=1, 
         | 
| 113 | 
             
                                            out_channels=1, 
         | 
| 114 | 
            -
                                            latent_channels=3,
         | 
| 115 | 
             
                                            num_channels=(64, 128, 128, 128),
         | 
| 116 | 
             
                                            num_res_blocks=2, 
         | 
| 117 | 
             
                                            norm_num_groups=32,
         | 
|  | |
| 72 | 
             
            INPUT_SHAPE_1p5mm = (122, 146, 122)   
         | 
| 73 |  | 
| 74 | 
             
            # Adjusting the dimensions to be divisible by 8 (2^3 where 3 are the downsampling layers of the AE)
         | 
| 75 | 
            +
            #INPUT_SHAPE_AE = (120, 144, 120)
         | 
| 76 | 
            +
            INPUT_SHAPE_AE = (80, 96, 80)
         | 
| 77 |  | 
| 78 | 
             
            # Latent shape of the autoencoder 
         | 
| 79 | 
            +
            LATENT_SHAPE_AE = (1, 10, 12, 10)   
         | 
| 80 |  | 
| 81 |  | 
| 82 | 
             
            def load_if(checkpoints_path: Optional[str], network: nn.Module) -> nn.Module:
         | 
|  | |
| 112 | 
             
                autoencoder = AutoencoderKL(spatial_dims=3, 
         | 
| 113 | 
             
                                            in_channels=1, 
         | 
| 114 | 
             
                                            out_channels=1, 
         | 
| 115 | 
            +
                                            latent_channels=1, #3,
         | 
| 116 | 
             
                                            num_channels=(64, 128, 128, 128),
         | 
| 117 | 
             
                                            num_res_blocks=2, 
         | 
| 118 | 
             
                                            norm_num_groups=32,
         | 
    	
        runs/Jan31_14-52-36_SOM-YT7DYVX-DT/events.out.tfevents.1738363956.SOM-YT7DYVX-DT.46314.0
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f7fa2b1c9d0b68193dc5deed1e0f6dc775a2b6a7ae04cd17e0c4268ee4460451
         | 
| 3 | 
            +
            size 184408
         | 
