import numpy as np
import json

def compute_semitones(frequencies, ref_freq=None):
    """计算半音值
    
    Args:
        frequencies (list): 基频列表（Hz）
        ref_freq (float, optional): 参考频率。如果不提供，使用固定参考频率
        
    Returns:
        list: 半音值列表
        float: 使用的参考频率
    """
    if ref_freq is None:
        # 使用固定参考频率，而不是取列表最小值
        # 选择40Hz作为参考，让所有声调显示在更高位置，一声更加平缓
        ref_freq = 40.0
        
        # 验证频率数据是否有效
        valid_freq = [f for f in frequencies if f > 0]
        if not valid_freq:
            return [0] * len(frequencies), ref_freq
    
    # 应用半音公式: 12 * log2(f/f_ref)
    semitones = [12 * np.log2(f / ref_freq) if f > 0 else 0 for f in frequencies]
    return semitones, ref_freq

def normalize_time(time_points):
    """将时间点归一化到0-1范围
    
    Args:
        time_points (list): 时间点列表
        
    Returns:
        list: 归一化后的时间点列表
    """
    if not time_points:
        return []
        
    min_time = min(time_points)
    max_time = max(time_points)
    
    # 避免除以零
    if max_time == min_time:
        return [0.5] * len(time_points)
    
    return [(t - min_time) / (max_time - min_time) for t in time_points]

def fit_curve(normalized_time, semitones, fit_type):
    """根据拟合类型拟合曲线
    
    Args:
        normalized_time (list): 归一化后的时间点
        semitones (list): 半音值
        fit_type (int): 拟合类型，1表示一次函数，2表示二次函数
    
    Returns:
        tuple: (x_fit, y_fit, coeffs, poly) 拟合后的曲线点和参数
    """
    # 创建更密集的点以获得平滑曲线
    x_fit = np.linspace(0, 1, 100)
    
    # 根据拟合类型选择拟合度数
    if fit_type == 2:
        # 使用二次多项式拟合
        degree = 2
    else:
        # 其他情况使用一次多项式拟合
        degree = 1
    
    # 拟合多项式
    coeffs = np.polyfit(normalized_time, semitones, degree)
    poly = np.poly1d(coeffs)
    y_fit = poly(x_fit)
    
    return x_fit, y_fit, coeffs, poly

def get_fit_type_from_tone(tone_type):
    """根据声调类型获取拟合类型
    
    Args:
        tone_type: 声调类型，可以是字符串或数字
        
    Returns:
        int: 拟合类型，1表示一次函数，2表示二次函数
    """
    if isinstance(tone_type, str):
        if "降升调" in tone_type or "起伏调" in tone_type or "三声" in tone_type:
            return 2
        return 1
    elif isinstance(tone_type, (int, float)):
        # 如果是数字，那么3代表三声，需要二次函数拟合
        return 2 if tone_type == 3 else 1
    else:
        # 默认使用一次函数拟合
        return 1

def extract_fit_coefficients(coeffs):
    """从拟合系数中提取a, b, c参数
    
    Args:
        coeffs: 拟合系数数组
        
    Returns:
        dict: 拟合参数，一次函数有a,b，二次函数有a,b,c
    """
    result = {}
    if len(coeffs) == 2:  # 一次函数: y = ax + b
        result = {
            'a': float(coeffs[0]),
            'b': float(coeffs[1])
        }
    elif len(coeffs) == 3:  # 二次函数: y = cx^2 + ax + b
        result = {
            'c': float(coeffs[0]),
            'a': float(coeffs[1]),
            'b': float(coeffs[2])
        }
    return result

def analyze_audio_data(data_json, tone=None):
    """分析音频数据，返回归一化点和拟合参数
    
    Args:
        data_json: 包含频率和时间数据的JSON对象
        tone: 声调类型，整数1-4，用于决定拟合类型
        
    Returns:
        dict: 包含归一化点、拟合参数和曲线点的结果
    """
    try:
        # 解析JSON数据
        if isinstance(data_json, str):
            data = json.loads(data_json)
        else:
            data = data_json
        
        # 验证数据格式
        if not isinstance(data, dict) or "data" not in data:
            return {"status": "error", "message": "数据格式错误: 缺少'data'字段"}
            
        if not data["data"] or len(data["data"]) == 0:
            return {"status": "error", "message": "数据为空"}
            
        # 提取频率和时间数据
        try:
            frequencies = [float(point.get("frequency", 0)) for point in data["data"]]
            time_points = [float(point.get("time", 0)) for point in data["data"]]
        except (ValueError, TypeError, AttributeError) as e:
            return {"status": "error", "message": f"数据解析错误: {e}"}
        
        # 过滤出有效的频率值
        valid_indices = [i for i, f in enumerate(frequencies) if f > 0]
        
        if not valid_indices:
            return {"status": "error", "message": "没有检测到有效的频率数据"}
            
        valid_frequencies = [frequencies[i] for i in valid_indices]
        valid_time_points = [time_points[i] for i in valid_indices]
        
        # 检查数据点是否足够
        if len(valid_frequencies) < 2:
            return {"status": "error", "message": "有效数据点不足，至少需要2个有效的频率值"}
        
        # 归一化处理
        normalized_time = normalize_time(valid_time_points)
        
        # 计算半音值（使用固定参考）
        semitones, ref_freq = compute_semitones(valid_frequencies)
        
        # 保存原始半音值
        original_semitones = semitones.copy()
        
        # 归一化半音值到0-5范围
        # 假设典型的人声范围（相对于40Hz）
        min_semitone = 16.0   # 约对应100Hz
        max_semitone = 36.0  # 约对应300Hz
        semitone_range = max_semitone - min_semitone
        
        # 执行线性映射: 将[min_semitone, max_semitone]映射到[0, 5]
        normalized_semitones = []
        for s in semitones:
            if s <= min_semitone:
                normalized_value = 0.0
            elif s >= max_semitone:
                normalized_value = 5.0
            else:
                normalized_value = 5.0 * (s - min_semitone) / semitone_range
            normalized_semitones.append(normalized_value)
        
        # 判断声调类型 (基于原始半音值)
        tone_type = ""
        semitone_range = max(semitones) - min(semitones)
        
        if semitone_range < 3:
            tone_type = "高平调"
        elif semitone_range >= 3 and semitone_range < 6:
            if semitones[-1] > semitones[0]:
                tone_type = "升调"
            else:
                tone_type = "降调"
        else:
            # 根据数据长度调整分段分析
            if len(semitones) >= 9:  # 足够长才分析复杂声调
                segments_count = min(3, len(semitones) // 3)
                first_segment = semitones[:segments_count]
                mid_segment = semitones[segments_count:2*segments_count]
                last_segment = semitones[-segments_count:]
                
                if np.mean(first_segment) > np.mean(mid_segment) and np.mean(last_segment) > np.mean(mid_segment):
                    tone_type = "降升调"
                elif semitones[0] > semitones[-1]:
                    tone_type = "全降调"
                elif semitones[-1] > semitones[0]:
                    tone_type = "全升调"
                else:
                    tone_type = "起伏调"
            else:  # 数据点较少时采用简化分析
                if semitones[-1] > semitones[0]:
                    tone_type = "升调"
                elif semitones[0] > semitones[-1]:
                    tone_type = "降调"
                else:
                    tone_type = "平调"
        
        # 根据传入的tone参数决定拟合类型
        if tone is not None:
            # 如果是三声，使用二次函数拟合
            fit_type = 2 if tone == 3 else 1
        else:
            # 如果没有提供tone参数，则使用旧的判断方式
        fit_type = get_fit_type_from_tone(tone_type)
        
        # 构建返回结果
        normalized_points = [
            {"x": x, "y": y} for x, y in zip(normalized_time, normalized_semitones)
        ]
        
        # 保存原始半音值点
        original_points = [
            {"x": x, "y": y} for x, y in zip(normalized_time, original_semitones)
        ]
        
        # 拟合曲线（使用归一化后的半音值）
        x_fit, y_fit, coeffs, poly = fit_curve(normalized_time, normalized_semitones, fit_type)
        
        # 提取拟合参数
        fit_params = extract_fit_coefficients(coeffs)
        fit_params['fit_type'] = fit_type
        
        fit_curve_points = [
            {"x": float(x), "y": float(y)} for x, y in zip(x_fit, y_fit)
        ]
        
        return {
            "status": "success",
            "normalized_points": normalized_points,
            "original_points": original_points,
            "fit_params": fit_params,
            "fit_curve_points": fit_curve_points,
            "tone_type": tone_type,
            "stats": {
                "mean": float(np.mean(semitones)),
                "min": float(np.min(semitones)),
                "max": float(np.max(semitones)),
                "range": float(semitone_range)
            },
            "normalized_stats": {
                "mean": float(np.mean(normalized_semitones)),
                "min": float(np.min(normalized_semitones)),
                "max": float(np.max(normalized_semitones)),
                "range": float(max(normalized_semitones) - min(normalized_semitones))
            }
        }
    except Exception as e:
        return {"status": "error", "message": str(e)}

# 简化版的处理函数，不依赖matplotlib
def process_frequency_data(frequency_data, time_data=None):
    """处理频率数据并返回分析结果
    
    Args:
        frequency_data (list): 基频列表，单位Hz
        time_data (list, optional): 对应的时间点列表。若不提供，则自动生成等间隔时间点
        
    Returns:
        dict: 分析结果
    """
    try:
        # 检查基频数据
        if not frequency_data or not isinstance(frequency_data, list):
            return {"status": "error", "message": "基频数据无效"}
            
        # 如果未提供时间数据，则生成等间隔时间点
        if time_data is None or len(time_data) != len(frequency_data):
            time_data = [i/len(frequency_data) for i in range(len(frequency_data))]
        
        # 构建数据格式
        data = {
            "data": [
                {"frequency": float(freq), "time": float(time)}
                for freq, time in zip(frequency_data, time_data)
            ],
            "status": "success"
        }
        
        # 调用分析函数
        return analyze_audio_data(data)
        
    except Exception as e:
        return {"status": "error", "message": str(e)}

# 示例数据
sample_data = {
    "data": [
        {"frequency": 186.5, "time": 0.00},
        {"frequency": 191.1, "time": 0.01},
        {"frequency": 196.6, "time": 0.02},
        {"frequency": 202.3, "time": 0.03},
        {"frequency": 207.9, "time": 0.04},
        {"frequency": 211.2, "time": 0.05},
        {"frequency": 212.1, "time": 0.06},
        {"frequency": 211.2, "time": 0.07},
        {"frequency": 209.6, "time": 0.08},
        {"frequency": 206.5, "time": 0.09},
        {"frequency": 202.3, "time": 0.10},
        {"frequency": 200.5, "time": 0.11},
        {"frequency": 199.2, "time": 0.12},
        {"frequency": 196.4, "time": 0.13},
        {"frequency": 192.0, "time": 0.14},
        {"frequency": 188.5, "time": 0.15},
        {"frequency": 184.2, "time": 0.16},
        {"frequency": 0.0, "time": 0.17},
        {"frequency": 0.0, "time": 0.18},
        {"frequency": 0.0, "time": 0.19},
        {"frequency": 0.0, "time": 0.20},
        {"frequency": 0.0, "time": 0.21},
        {"frequency": 0.0, "time": 0.22},
        {"frequency": 0.0, "time": 0.23},
        {"frequency": 0.0, "time": 0.24},
        {"frequency": 0.0, "time": 0.25},
        {"frequency": 0.0, "time": 0.26},
        {"frequency": 0.0, "time": 0.27},
        {"frequency": 0.0, "time": 0.28},
        {"frequency": 0.0, "time": 0.29},
        {"frequency": 0.0, "time": 0.30},
        {"frequency": 0.0, "time": 0.31},
        {"frequency": 0.0, "time": 0.32},
        {"frequency": 0.0, "time": 0.33},
        {"frequency": 80.9, "time": 0.34},
        {"frequency": 78.0, "time": 0.35}
    ],
    "status": "success"
}

if __name__ == "__main__":
    # 使用示例数据进行分析
    result = analyze_audio_data(sample_data)
    print(json.dumps(result, indent=2, ensure_ascii=False)) 