import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, recall_score, f1_score, precision_score
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, LSTM, Input
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from hmmlearn import hmm
from scipy.stats import mode
import matplotlib.pyplot as plt

class TrafficAnalyzer:
    def __init__(self):
        plt.rcParams['font.sans-serif'] = ['SimHei']
        plt.rcParams['axes.unicode_minus'] = False
        self.models = {}
        self.results = {}
        
    def load_and_preprocess_data(self, accidents_file, traffic_file):
        """加载和预处理数据"""
        # 加载数据
        accidents = pd.read_csv(accidents_file, encoding='gbk')
        traffic = pd.read_csv(traffic_file, encoding='gbk')
        
        # 确保 'FSTR_BMCODE' 是字符串类型
        accidents['FSTR_BMCODE'] = accidents['FSTR_BMCODE'].astype(str)
        traffic['FSTR_BMCODE'] = traffic['FSTR_BMCODE'].astype(str)
        
        # 提取日期
        accidents['FSTR_FINDTIME'] = pd.to_datetime(accidents['FSTR_FINDTIME'], errors='coerce')
        accidents['date'] = accidents['FSTR_FINDTIME'].dt.date
        
        traffic['FDT_TIME'] = pd.to_datetime(traffic['FDT_TIME'], errors='coerce')
        traffic['date'] = traffic['FDT_TIME'].dt.date
        
        # 合并数据集
        merged_data = pd.merge(traffic, accidents, on=['FSTR_BMCODE', 'date'], how='left')
        
        # 填充缺失值
        merged_data['FSTR_ROADASSET_DESC'] = merged_data['FSTR_ROADASSET_DESC'].fillna('无交通事故')
        merged_data.dropna(subset=['SPEED', 'FINT_LANEVOLUME', 'FINT_SECTVOLUME'], inplace=True)
        
        # 计算事故严重程度
        merged_data['Severity'] = merged_data['FSTR_ROADASSET_DESC'].apply(self._determine_severity)
        
        # 将日期转换为序数
        merged_data['date'] = merged_data['date'].apply(lambda x: x.toordinal())
        
        return merged_data
    
    def _determine_severity(self, text):
        """确定事故严重程度"""
        severity_keywords = {
            '车辆抛锚': 0.5,
            '车辆交通事故': 1,
            '轻微碰撞': 0.3,
            '严重碰撞': 1.5,
            '人员受伤': 2,
            '交通堵塞': 0.8,
            '道路封闭': 2,
            '无人员伤亡': 0.2,
            '轻微刮擦': 0.1,
            '无交通事故': 0
        }
        severity = 0
        for keyword, value in severity_keywords.items():
            if keyword in text:
                severity += value
        if severity == 0:
            severity = 0.1  # 默认轻微事故
        return severity
    
    def train_hmm_model(self, data):
        """训练HMM模型"""
        # 准备数据
        X = data[['SPEED', 'FINT_LANEVOLUME', 'FINT_SECTVOLUME']].values
        y = data['Severity'].values
        
        # 构建HMM模型
        hmm_model = hmm.GaussianHMM(n_components=3, covariance_type="full", n_iter=200)
        hmm_model.fit(X)
        
        # 获取隐藏状态
        hidden_states = hmm_model.predict(X)
        
        # 找到每个隐藏状态对应的分类标签的众数
        state_to_label = {}
        for state in np.unique(hidden_states):
            state_labels = y[hidden_states == state]
            most_common_label = mode(state_labels).mode.item()
            state_to_label[state] = most_common_label
            
        # 将隐藏状态映射到分类标签
        y_pred = np.array([state_to_label[state] for state in hidden_states])
        
        # 计算评估指标
        accuracy = accuracy_score(y, y_pred)
        recall = recall_score(y, y_pred, average='weighted')
        precision = precision_score(y, y_pred, average='weighted')
        f1 = f1_score(y, y_pred, average='weighted')
        
        self.models['hmm'] = hmm_model
        self.results['hmm'] = {
            'accuracy': accuracy,
            'recall': recall,
            'precision': precision,
            'f1': f1
        }
        
        return self.results['hmm']
    
    def train_cnn_model(self, data, n_timesteps=30):
        """训练CNN模型"""
        # 准备数据
        X = data[['SPEED', 'FINT_LANEVOLUME', 'FINT_SECTVOLUME']].values
        y = data['Severity'].values
        
        # 调整数据形状
        if X.shape[0] % n_timesteps != 0:
            padding_size = n_timesteps - (X.shape[0] % n_timesteps)
            X = np.pad(X, ((0, padding_size), (0, 0)), mode='constant')
            y = np.pad(y, (0, padding_size), mode='constant')
            
        X = X.reshape(-1, n_timesteps, X.shape[1])
        y = y[:X.shape[0]]
        
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
        
        # 构建CNN模型
        model = Sequential([
            Input(shape=(n_timesteps, X.shape[2])),
            Conv1D(filters=64, kernel_size=3, activation='relu', kernel_regularizer=l2(0.01)),
            MaxPooling1D(pool_size=2),
            Conv1D(filters=128, kernel_size=3, activation='relu', kernel_regularizer=l2(0.01)),
            MaxPooling1D(pool_size=2),
            Flatten(),
            Dense(128, activation='relu', kernel_regularizer=l2(0.01)),
            Dense(64, activation='relu', kernel_regularizer=l2(0.01)),
            Dense(1, activation='sigmoid')
        ])
        
        # 编译模型
        model.compile(optimizer=Adam(learning_rate=0.0001),
                     loss='binary_crossentropy',
                     metrics=['accuracy'])
        
        # 训练模型
        history = model.fit(X_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
        
        # 评估模型
        y_pred = (model.predict(X_test) > 0.5).astype('int32').flatten()
        
        accuracy = accuracy_score(y_test, y_pred)
        recall = recall_score(y_test, y_pred, average='weighted')
        precision = precision_score(y_test, y_pred, average='weighted')
        f1 = f1_score(y_test, y_pred, average='weighted')
        
        self.models['cnn'] = model
        self.results['cnn'] = {
            'accuracy': accuracy,
            'recall': recall,
            'precision': precision,
            'f1': f1,
            'history': history.history
        }
        
        return self.results['cnn']
    
    def visualize_results(self):
        """可视化分析结果"""
        # 创建图形和子图
        fig, axs = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('交通数据分析结果', fontsize=16)
        
        # 绘制HMM结果
        if 'hmm' in self.results:
            metrics = ['accuracy', 'recall', 'precision', 'f1']
            values = [self.results['hmm'][m] for m in metrics]
            axs[0, 0].bar(metrics, values)
            axs[0, 0].set_title('HMM模型评估指标')
            axs[0, 0].set_ylim(0, 1)
            
        # 绘制CNN结果
        if 'cnn' in self.results:
            # 训练历史
            history = self.results['cnn']['history']
            axs[0, 1].plot(history['loss'], label='训练损失')
            axs[0, 1].plot(history['val_loss'], label='验证损失')
            axs[0, 1].set_title('CNN模型训练历史')
            axs[0, 1].legend()
            
            # CNN评估指标
            metrics = ['accuracy', 'recall', 'precision', 'f1']
            values = [self.results['cnn'][m] for m in metrics]
            axs[1, 0].bar(metrics, values)
            axs[1, 0].set_title('CNN模型评估指标')
            axs[1, 0].set_ylim(0, 1)
            
        plt.tight_layout()
        plt.show()
        
    def analyze_traffic_data(self, accidents_file, traffic_file):
        """完整的交通数据分析流程"""
        # 加载和预处理数据
        data = self.load_and_preprocess_data(accidents_file, traffic_file)
        
        # 训练HMM模型
        hmm_results = self.train_hmm_model(data)
        print("HMM模型训练完成:")
        print(f"准确率: {hmm_results['accuracy']:.4f}")
        print(f"召回率: {hmm_results['recall']:.4f}")
        print(f"精确率: {hmm_results['precision']:.4f}")
        print(f"F1分数: {hmm_results['f1']:.4f}")
        
        # 训练CNN模型
        cnn_results = self.train_cnn_model(data)
        print("\nCNN模型训练完成:")
        print(f"准确率: {cnn_results['accuracy']:.4f}")
        print(f"召回率: {cnn_results['recall']:.4f}")
        print(f"精确率: {cnn_results['precision']:.4f}")
        print(f"F1分数: {cnn_results['f1']:.4f}")
        
        # 可视化结果
        self.visualize_results() 