#!/usr/bin/env python3
"""
DSL 重复字段分析脚本
分析所有DSL模型文件中的重复字段定义，识别可以模板化的通用字段
"""

import os
import yaml
from collections import defaultdict, Counter
import json

def load_yaml_file(file_path):
    """加载YAML文件"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)
    except Exception as e:
        print(f"Error loading {file_path}: {e}")
        return None

def extract_field_patterns(data, entity_name=""):
    """提取字段模式"""
    patterns = []
    
    if not isinstance(data, dict) or 'entities' not in data:
        return patterns
    
    for entity_name, entity_def in data['entities'].items():
        if 'attributes' not in entity_def:
            continue
            
        for field_name, field_def in entity_def['attributes'].items():
            if isinstance(field_def, dict):
                pattern = {
                    'entity': entity_name,
                    'field_name': field_name,
                    'type': field_def.get('type', ''),
                    'description': field_def.get('description', ''),
                    'rules': field_def.get('rules', []),
                    'default': field_def.get('default'),
                    'values': field_def.get('values', [])
                }
                patterns.append(pattern)
    
    return patterns

def normalize_field_signature(pattern):
    """标准化字段签名用于比较"""
    # 处理规则列表，可能包含字符串和字典
    rules_normalized = []
    if pattern['rules']:
        for rule in pattern['rules']:
            if isinstance(rule, dict):
                # 字典规则转换为字符串
                for key, value in rule.items():
                    rules_normalized.append(f"{key}:{value}")
            else:
                # 字符串规则直接添加
                rules_normalized.append(str(rule))
    
    rules_str = str(sorted(rules_normalized)) if rules_normalized else ''
    values_str = str(sorted(pattern['values'])) if pattern['values'] else ''
    
    return f"{pattern['type']}|{rules_str}|{pattern['default']}|{values_str}"

def analyze_duplicate_fields():
    """分析重复字段"""
    models_dir = "/Users/lee/projects/ts-dsl-orm/models"
    all_patterns = []
    
    # 只分析主要的模型文件，排除备份文件
    model_files = [f for f in os.listdir(models_dir) 
                   if f.endswith('.yml') and not f.endswith('.backup') 
                   and 'backup' not in f]
    
    print(f"分析文件: {model_files}")
    
    for filename in model_files:
        file_path = os.path.join(models_dir, filename)
        data = load_yaml_file(file_path)
        if data:
            patterns = extract_field_patterns(data)
            all_patterns.extend(patterns)
    
    # 按字段名分组
    fields_by_name = defaultdict(list)
    for pattern in all_patterns:
        fields_by_name[pattern['field_name']].append(pattern)
    
    # 按字段签名分组
    fields_by_signature = defaultdict(list)
    for pattern in all_patterns:
        signature = normalize_field_signature(pattern)
        fields_by_signature[signature].append(pattern)
    
    # 分析结果
    analysis_result = {
        'total_fields': len(all_patterns),
        'unique_field_names': len(fields_by_name),
        'duplicate_field_names': {},
        'common_field_patterns': {},
        'template_candidates': []
    }
    
    # 找出重复的字段名
    for field_name, patterns in fields_by_name.items():
        if len(patterns) > 1:
            analysis_result['duplicate_field_names'][field_name] = {
                'count': len(patterns),
                'entities': [p['entity'] for p in patterns],
                'variations': []
            }
            
            # 分析变体
            signatures = set()
            for pattern in patterns:
                sig = normalize_field_signature(pattern)
                signatures.add(sig)
                analysis_result['duplicate_field_names'][field_name]['variations'].append({
                    'entity': pattern['entity'],
                    'type': pattern['type'],
                    'description': pattern['description'],
                    'rules': pattern['rules'],
                    'default': pattern['default'],
                    'values': pattern['values']
                })
            
            analysis_result['duplicate_field_names'][field_name]['unique_signatures'] = len(signatures)
    
    # 找出通用字段模式
    for signature, patterns in fields_by_signature.items():
        if len(patterns) > 1:
            field_names = [p['field_name'] for p in patterns]
            entities = [p['entity'] for p in patterns]
            
            analysis_result['common_field_patterns'][signature] = {
                'count': len(patterns),
                'field_names': field_names,
                'entities': entities,
                'example': patterns[0]
            }
    
    # 识别模板候选
    # 1. 审计字段模板 (createdAt, updatedAt)
    audit_fields = ['createdAt', 'updatedAt']
    audit_candidates = []
    for field_name in audit_fields:
        if field_name in fields_by_name:
            audit_candidates.extend(fields_by_name[field_name])
    
    if audit_candidates:
        analysis_result['template_candidates'].append({
            'template_name': 'AuditFields',
            'description': '审计字段模板 (创建时间、更新时间)',
            'fields': audit_candidates,
            'usage_count': len(audit_candidates)
        })
    
    # 2. 标识字段模板 (id)
    if 'id' in fields_by_name:
        analysis_result['template_candidates'].append({
            'template_name': 'IdentityField',
            'description': '标识字段模板 (主键ID)',
            'fields': fields_by_name['id'],
            'usage_count': len(fields_by_name['id'])
        })
    
    # 3. 状态字段模板
    status_fields = ['status']
    status_candidates = []
    for field_name in status_fields:
        if field_name in fields_by_name:
            status_candidates.extend(fields_by_name[field_name])
    
    if status_candidates:
        analysis_result['template_candidates'].append({
            'template_name': 'StatusField',
            'description': '状态字段模板 (枚举状态)',
            'fields': status_candidates,
            'usage_count': len(status_candidates)
        })
    
    return analysis_result

def generate_report(analysis_result):
    """生成分析报告"""
    report = []
    report.append("# DSL 重复字段分析报告\n")
    
    report.append("## 📊 总体统计\n")
    report.append(f"- 总字段数: {analysis_result['total_fields']}")
    report.append(f"- 唯一字段名: {analysis_result['unique_field_names']}")
    report.append(f"- 重复字段名: {len(analysis_result['duplicate_field_names'])}")
    report.append(f"- 通用字段模式: {len(analysis_result['common_field_patterns'])}")
    report.append(f"- 模板候选: {len(analysis_result['template_candidates'])}\n")
    
    report.append("## 🔄 重复字段名分析\n")
    for field_name, info in analysis_result['duplicate_field_names'].items():
        report.append(f"### {field_name}")
        report.append(f"- 出现次数: {info['count']}")
        report.append(f"- 涉及实体: {', '.join(info['entities'])}")
        report.append(f"- 唯一签名数: {info['unique_signatures']}")
        
        if info['unique_signatures'] == 1:
            report.append("- ✅ **完全一致，适合模板化**")
        else:
            report.append("- ⚠️ **存在差异，需要标准化**")
            
        report.append("\n**变体详情:**")
        for i, var in enumerate(info['variations'], 1):
            report.append(f"{i}. {var['entity']}: {var['type']} - {var['description']}")
            if var['rules']:
                report.append(f"   规则: {var['rules']}")
            if var['default']:
                report.append(f"   默认值: {var['default']}")
            if var['values']:
                report.append(f"   枚举值: {var['values']}")
        report.append("")
    
    report.append("## 🎯 模板候选分析\n")
    for template in analysis_result['template_candidates']:
        report.append(f"### {template['template_name']}")
        report.append(f"- 描述: {template['description']}")
        report.append(f"- 使用次数: {template['usage_count']}")
        report.append("- 涉及字段:")
        
        field_groups = defaultdict(list)
        for field in template['fields']:
            field_groups[field['field_name']].append(field['entity'])
        
        for field_name, entities in field_groups.items():
            report.append(f"  - {field_name}: {', '.join(entities)}")
        report.append("")
    
    report.append("## 💡 优化建议\n")
    report.append("### 1. 立即可模板化的字段")
    
    immediate_candidates = []
    for field_name, info in analysis_result['duplicate_field_names'].items():
        if info['unique_signatures'] == 1 and info['count'] >= 2:
            immediate_candidates.append(field_name)
    
    if immediate_candidates:
        report.append("以下字段完全一致，可以立即创建模板:")
        for field_name in immediate_candidates:
            report.append(f"- {field_name}")
    else:
        report.append("暂无完全一致的重复字段")
    
    report.append("\n### 2. 需要标准化的字段")
    
    standardization_candidates = []
    for field_name, info in analysis_result['duplicate_field_names'].items():
        if info['unique_signatures'] > 1:
            standardization_candidates.append(field_name)
    
    if standardization_candidates:
        report.append("以下字段存在差异，建议先标准化:")
        for field_name in standardization_candidates:
            report.append(f"- {field_name}")
    
    report.append("\n### 3. 推荐的模板结构")
    report.append("```yaml")
    report.append("# 建议的字段模板结构")
    report.append("templates:")
    report.append("  AuditFields:")
    report.append("    createdAt:")
    report.append("      type: datetime")
    report.append("      description: 创建时间")
    report.append("      rules:")
    report.append("        - required")
    report.append("    updatedAt:")
    report.append("      type: datetime")
    report.append("      description: 更新时间")
    report.append("      rules: []")
    report.append("")
    report.append("  IdentityField:")
    report.append("    id:")
    report.append("      type: uuid")
    report.append("      description: 唯一标识符")
    report.append("      rules:")
    report.append("        - required")
    report.append("```")
    
    return "\n".join(report)

def main():
    """主函数"""
    print("开始分析DSL模型文件中的重复字段...")
    
    analysis_result = analyze_duplicate_fields()
    
    # 保存JSON结果
    json_output = "/Users/lee/projects/ts-dsl-orm/docs/duplicate-fields-analysis.json"
    with open(json_output, 'w', encoding='utf-8') as f:
        json.dump(analysis_result, f, ensure_ascii=False, indent=2)
    
    # 生成Markdown报告
    report = generate_report(analysis_result)
    report_output = "/Users/lee/projects/ts-dsl-orm/docs/duplicate-fields-analysis.md"
    with open(report_output, 'w', encoding='utf-8') as f:
        f.write(report)
    
    print(f"✅ 分析完成!")
    print(f"📄 JSON结果: {json_output}")
    print(f"📋 报告文件: {report_output}")
    
    # 显示关键统计
    print(f"\n📊 关键统计:")
    print(f"- 总字段数: {analysis_result['total_fields']}")
    print(f"- 重复字段名: {len(analysis_result['duplicate_field_names'])}")
    print(f"- 模板候选: {len(analysis_result['template_candidates'])}")

if __name__ == "__main__":
    main()