import librosa
import numpy as np
import pretty_midi
import music21
from music21 import stream, note, meter, tempo
from fastapi import APIRouter, File, UploadFile, HTTPException, status
from typing import Optional, List, Tuple
import asyncio
from pathlib import Path
import soundfile as sf

# FastAPI 路由对象
router = APIRouter()

# 错误类，兼容API
class AudioProcessingError(Exception):
    def __init__(self, message: str, error_code: int = status.HTTP_500_INTERNAL_SERVER_ERROR):
        self.message = message
        self.error_code = error_code
        super().__init__(self.message)

def extract_melody(audio_path: str, tempo_bpm: int = None) -> List[Tuple[int, float, float]]:
    """
    提取音频中的旋律，检测钢琴按键的起始和对应的音高，音符时值更接近真实弹奏
    tempo_bpm 参数为兼容性保留，当前未用到
    """
    # 加载音频
    y, sr = librosa.load(audio_path, sr=48000)
    
    # 检测音符起始点
    onset_frames = librosa.onset.onset_detect(
        y=y,
        sr=sr,
        hop_length=512,
        backtrack=True,
        pre_avg=0.5,
        post_avg=0.5,
        pre_max=0.5,
        post_max=0.5
    )
    
    # 将帧转换为时间
    onset_times = librosa.frames_to_time(onset_frames, sr=sr, hop_length=512)
    
    # 使用更精确的音高检测参数
    pitches, voiced_flags, _ = librosa.pyin(
        y,
        fmin=librosa.note_to_hz('A0'),
        fmax=librosa.note_to_hz('C8'),
        sr=sr,
        frame_length=2048,
        hop_length=512,
        win_length=1024,
        n_thresholds=100,
        beta_parameters=(2, 18)
    )
    
    # 获取时间点
    times = librosa.times_like(pitches, sr=sr, hop_length=512)
    
    notes = []
    for i in range(len(onset_times)):
        start_time = onset_times[i]
        start_idx = np.argmin(np.abs(times - start_time))
        # 寻找音符结束点：下一个无声帧或下一个onset
        if i < len(onset_times) - 1:
            search_end = np.argmin(np.abs(times - onset_times[i+1]))
        else:
            search_end = len(times) - 1
        end_idx = start_idx
        for j in range(start_idx+1, search_end):
            if not voiced_flags[j] or np.isnan(pitches[j]):
                end_idx = j
                break
            end_idx = j
        end_time = times[end_idx]
        # 取起始帧的音高
        if voiced_flags[start_idx] and not np.isnan(pitches[start_idx]):
            midi_pitch = int(round(librosa.hz_to_midi(pitches[start_idx])))
            notes.append((midi_pitch, start_time, end_time))
    return notes

def merge_notes(pitches: np.ndarray, voiced_flags: np.ndarray, times: np.ndarray, tempo_bpm: int) -> List[Tuple[int, float, float]]:
    """
    简化的音符合并算法，每次检测到音高就生成新音符，适用于一般旋律
    """
    notes = []
    cur_pitch, cur_start = None, None
    beat_duration = 60.0 / tempo_bpm
    for i, (p, v) in enumerate(zip(pitches, voiced_flags)):
        t = times[i]
        if v and not np.isnan(p):
            midi_pitch = int(round(librosa.hz_to_midi(p)))
            if cur_pitch is None:
                cur_pitch, cur_start = midi_pitch, t
            else:
                quantized_duration = beat_duration * 1.0
                notes.append((cur_pitch, cur_start, cur_start + quantized_duration))
                cur_pitch, cur_start = midi_pitch, t
        else:
            if cur_pitch is not None:
                quantized_duration = beat_duration * 1.0
                notes.append((cur_pitch, cur_start, cur_start + quantized_duration))
                cur_pitch, cur_start = None, None
    if cur_pitch is not None:
        quantized_duration = beat_duration * 1.0
        notes.append((cur_pitch, cur_start, cur_start + quantized_duration))
    return notes

def notes_to_midi(notes: List[Tuple[int, float, float]], output_path: str, tempo_bpm: int = None) -> None:
    """
    将音符列表转换为MIDI文件，适用于一般旋律
    tempo_bpm 参数为兼容性保留，当前未用到
    """
    try:
        midi_data = pretty_midi.PrettyMIDI()
        piano_program = pretty_midi.Instrument(program=0)
        for pitch, start, end in notes:
            if pitch < 60:
                pitch += 12
            elif pitch > 72:
                pitch -= 12
            note = pretty_midi.Note(
                velocity=100,
                pitch=pitch,
                start=start,
                end=end
            )
            piano_program.notes.append(note)
        midi_data.instruments.append(piano_program)
        midi_data.write(output_path)
    except Exception as e:
        raise AudioProcessingError(f"MIDI文件生成失败: {str(e)}")

def notes_to_musicxml(notes: List[Tuple[int, float, float]], output_path: str, tempo_bpm: int) -> None:
    """
    将音符列表转换为MusicXML文件，适用于一般旋律
    """
    try:
        score = music21.stream.Score()
        part = music21.stream.Part()
        key = music21.key.Key('C')
        part.append(key)
        time_signature = music21.meter.TimeSignature('4/4')
        part.append(time_signature)
        tempo = music21.tempo.MetronomeMark(number=tempo_bpm)
        part.append(tempo)
        beat_duration = 60.0 / tempo_bpm
        for pitch, start, end in notes:
            if pitch < 60:
                pitch += 12
            elif pitch > 72:
                pitch -= 12
            duration = end - start
            n = music21.note.Note()
            n.pitch.midi = pitch
            if abs(duration - beat_duration * 2.0) < 0.1:
                n.quarterLength = 2.0  # 二分音符
            else:
                n.quarterLength = 1.0  # 四分音符
            part.append(n)
        score.append(part)
        score.write('musicxml', output_path)
    except Exception as e:
        raise AudioProcessingError(f"乐谱生成失败: {str(e)}")

def play_audio(audio_data: np.ndarray, sr: int, output_path: str):
    """
    保存音频为WAV文件
    """
    try:
        # 确保音频数据是float32类型
        if audio_data.dtype != np.float32:
            audio_data = audio_data.astype(np.float32)
        # 确保音频数据在[-1, 1]范围内
        if np.max(np.abs(audio_data)) > 1.0:
            audio_data = audio_data / np.max(np.abs(audio_data))
        # 保存为WAV文件
        sf.write(output_path, audio_data, sr)
        return output_path
    except Exception as e:
        raise AudioProcessingError(f"音频保存失败: {str(e)}")

def process_audio(
    file_path: str,
    tempo_bpm: int = None,
    min_note_duration: float = 0.1
) -> dict:
    try:
        y, sr = librosa.load(file_path, sr=44100, mono=True)
        if tempo_bpm is None:
            tempo_bpm_detected, _ = librosa.beat.beat_track(y=y, sr=sr)
            tempo_bpm = int(tempo_bpm_detected)
        if len(y) == 0:
            raise AudioProcessingError("音频文件为空")
        notes = extract_melody(file_path, tempo_bpm=tempo_bpm)
        if len(notes) == 0 or np.all(np.isnan(notes)):
            raise AudioProcessingError("未能提取到有效的旋律")
        # 创建输出目录（如不存在）
        output_dir = Path("output")
        output_dir.mkdir(parents=True, exist_ok=True)
        base_name = Path(file_path).stem
        midi_path = output_dir / f"{base_name}.mid"
        musicxml_path = output_dir / f"{base_name}.musicxml"
        generated_audio_path = output_dir / f"{base_name}_generated.wav"
        # 写入MIDI文件
        try:
            notes_to_midi(notes, str(midi_path), tempo_bpm)
        except Exception as e:
            raise AudioProcessingError(f"MIDI文件写入失败: {str(e)}")
        # 写入MusicXML文件
        try:
            notes_to_musicxml(notes, str(musicxml_path), tempo_bpm)
        except Exception as e:
            raise AudioProcessingError(f"MusicXML文件写入失败: {str(e)}")
        # 生成音频
        try:
            pm = pretty_midi.PrettyMIDI(str(midi_path))
            generated_audio = pm.synthesize(fs=sr)
            play_audio(generated_audio, sr, str(generated_audio_path))
        except Exception as e:
            raise AudioProcessingError(f"生成音频文件失败: {str(e)}")
        note_list = []
        for pitch, start, end in notes:
            if pitch is not None:
                note_names = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
                note_name = note_names[pitch % 12]
                octave = (pitch // 12) - 1
                note_name = f"{note_name}{octave}"
                note_list.append({
                    "pitch": pitch,
                    "start_time": start,
                    "duration": end - start,
                    "velocity": 90,
                    "note_name": note_name
                })
        return {
            "metadata": {
                "duration": librosa.get_duration(y=y, sr=sr),
                "sample_rate": sr,
                "channels": 1 if len(y.shape) == 1 else 2,
                "tempo": tempo_bpm,
                "notes": note_list
            },
            "output_files": {
                "midi": str(midi_path),
                "musicxml": str(musicxml_path),
                "generated_audio": str(generated_audio_path)
            }
        }
    except Exception as e:
        raise AudioProcessingError(f"音频处理失败: {str(e)}")

# 异步API兼容
async def process_audio_async(
    file_path: str,
    tempo_bpm: int = 120,
    min_note_duration: float = 0.1
) -> dict:
    loop = asyncio.get_event_loop()
    return await loop.run_in_executor(
        None, process_audio, file_path, tempo_bpm, min_note_duration
    )

# FastAPI 路由，兼容原有API
@router.post("/process")
async def process_audio_endpoint(
    file: UploadFile = File(...),
    tempo_bpm: Optional[int] = None,
    min_note_duration: Optional[float] = 0.1
):
    try:
        temp_path = Path(f"temp_{file.filename}")
        with open(temp_path, "wb") as f_out:
            f_out.write(await file.read())
        result = await process_audio_async(str(temp_path), tempo_bpm, min_note_duration)
        try:
            temp_path.unlink(missing_ok=True)
        except Exception:
            pass
        return result
    except AudioProcessingError as e:
        raise HTTPException(status_code=e.error_code, detail=e.message)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"服务器内部错误: {str(e)}")