quant_stage: quant_modifiers: SmoothQuantModifier: smoothing_strength: 0.8 mappings: - !!python/tuple - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj'] - re:.*input_layernorm - !!python/tuple - ['re:.*gate_proj', 're:.*up_proj'] - re:.*post_attention_layernorm ignore: [] GPTQModifier: config_groups: group_0: targets: [Linear] weights: num_bits: 8 type: int symmetric: true group_size: null strategy: channel block_structure: null dynamic: false actorder: null observer: minmax observer_kwargs: {} input_activations: num_bits: 8 type: int symmetric: true group_size: null strategy: tensor block_structure: null dynamic: false actorder: null observer: minmax observer_kwargs: {} output_activations: null format: null targets: [Linear] ignore: [lm_head] block_size: 128 dampening_frac: 0.01 offload_hessians: false