MLX_DeepSeek_V3_1_4bit / analyze_model.py
TroglodyteDerivations's picture
Upload 7 files
41b0a37 verified
## 2. analyze_model.py
#```python
#!/usr/bin/env python3
"""
Comprehensive analysis of the DeepSeek-V3.1-4bit model
"""
import argparse
from transformers import AutoTokenizer, AutoConfig
import json
import logging
from pathlib import Path
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def analyze_model(model_path):
"""Comprehensive model analysis"""
logger.info("=" * 60)
logger.info("🔍 DeepSeek-V3.1-4bit Comprehensive Analysis")
logger.info("=" * 60)
# Load config
try:
config = AutoConfig.from_pretrained(model_path)
logger.info("📊 Model Configuration:")
logger.info(f" Architecture: {config.architectures[0] if config.architectures else 'N/A'}")
logger.info(f" Model type: {getattr(config, 'model_type', 'N/A')}")
logger.info(f" Vocab size: {getattr(config, 'vocab_size', 'N/A'):,}")
logger.info(f" Hidden size: {getattr(config, 'hidden_size', 'N/A')}")
logger.info(f" Num hidden layers: {getattr(config, 'num_hidden_layers', 'N/A')}")
logger.info(f" Num attention heads: {getattr(config, 'num_attention_heads', 'N/A')}")
logger.info(f" Max position embeddings: {getattr(config, 'max_position_embeddings', 'N/A')}")
logger.info(f" Context length: {getattr(config, 'max_position_embeddings', 'N/A')}")
# DeepSeek specific config
if hasattr(config, 'rope_theta'):
logger.info(f" RoPE theta: {config.rope_theta}")
if hasattr(config, 'rms_norm_eps'):
logger.info(f" RMS norm eps: {config.rms_norm_eps}")
except Exception as e:
logger.error(f"❌ Failed to load config: {e}")
return
# Load tokenizer
try:
tokenizer = AutoTokenizer.from_pretrained(model_path)
logger.info("\n🔤 Tokenizer Analysis:")
logger.info(f" Vocabulary size: {tokenizer.vocab_size:,}")
logger.info(f" Special tokens: {len(tokenizer.special_tokens_map)}")
logger.info(f" Padding token: {tokenizer.pad_token}")
logger.info(f" EOS token: {tokenizer.eos_token}")
logger.info(f" BOS token: {tokenizer.bos_token}")
# Check for special tokens
special_tokens = getattr(tokenizer, 'special_tokens_map', {})
for key, value in special_tokens.items():
logger.info(f" {key}: {value}")
except Exception as e:
logger.error(f"❌ Failed to load tokenizer: {e}")
return
# Test various prompts
test_prompts = [
"The capital of France is",
"Artificial intelligence is",
"The future of machine learning will",
"Once upon a time",
"import numpy as np",
"量子コンピューティングとは", # Japanese
"El aprendizaje automático es", # Spanish
"机器学习是", # Chinese
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nHello!<|im_end|>\n<|im_start|>assistant",
"def fibonacci(n):",
"The quick brown fox jumps over the lazy dog"
]
logger.info("\n🧪 Tokenization Examples:")
for prompt in test_prompts:
try:
tokens = tokenizer.encode(prompt)
decoded = tokenizer.decode(tokens[:10]) + ("..." if len(tokens) > 10 else "")
logger.info(f" '{prompt[:30]}{'...' if len(prompt) > 30 else ''}'")
logger.info(f" → {len(tokens)} tokens: {tokens[:10]}{'...' if len(tokens) > 10 else ''}")
logger.info(f" → decoded: {decoded}")
except Exception as e:
logger.warning(f" Failed to tokenize: {prompt[:30]} - {e}")
# Check model files
model_dir = Path(model_path)
model_files = list(model_dir.glob("*.safetensors")) + list(model_dir.glob("*.npz")) + list(model_dir.glob("*.gguf"))
logger.info(f"\n📦 Model Files: {len(model_files)} weight files")
for file in model_files:
size_mb = file.stat().st_size / (1024 * 1024)
logger.info(f" {file.name} ({size_mb:.1f} MB)")
# Estimate memory requirements
total_params = 236_000_000_000 # 236B parameters
param_size = 0.5 # bytes per parameter for 4-bit quantization
total_memory_gb = (total_params * param_size) / (1024 ** 3)
logger.info("\n💾 Memory Requirements (Estimated):")
logger.info(f" Model size (4-bit): ~{total_memory_gb:.1f} GB")
logger.info(f" Inference RAM: ~{total_memory_gb * 1.5:.1f} GB+ (for 128K context)")
logger.info(f" GPU VRAM: ~{total_memory_gb:.1f} GB+ (recommended)")
return config, tokenizer
def main():
parser = argparse.ArgumentParser(description="Analyze DeepSeek-V3.1-4bit model")
parser.add_argument("--model-path", type=str, default="./deepseek_v3_4bit",
help="Path to the downloaded model")
args = parser.parse_args()
if not os.path.exists(args.model_path):
logger.error(f"Model path does not exist: {args.model_path}")
return 1
analyze_model(args.model_path)
return 0
if __name__ == "__main__":
exit(main())
#```