import pandas as pd
import numpy as np
from pathlib import Path
import logging
import chardet
from datetime import datetime
import re

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    filename='data_cleaning.log',
    encoding='utf-8'  # 确保日志文件使用UTF-8编码
)


def setup_paths():
    """配置路径"""
    base_dir = Path(r"D:\code\python\commercial")
    paths = {
        'data_dir': base_dir / "data",
        'clean_dir': base_dir / "clean",
        'profile_dir': base_dir / "profiles"
    }

    # 创建输出目录
    for path in paths.values():
        path.mkdir(parents=True, exist_ok=True)

    return paths


def safe_read_csv(filepath):
    """更安全的CSV读取函数"""
    try:
        # 检测编码
        with open(filepath, 'rb') as f:
            rawdata = f.read(100000)  # 读取前100KB用于检测
            result = chardet.detect(rawdata)

        # 优先尝试GB系列编码
        encodings = ['gb18030', 'gbk', 'utf-8', 'utf-8-sig', 'latin1']
        detected_enc = result['encoding'] if result['confidence'] > 0.8 else None

        for enc in [detected_enc] + encodings if detected_enc else encodings:
            try:
                return pd.read_csv(filepath, encoding=enc, dtype='object')
            except (UnicodeDecodeError, LookupError) as e:
                continue

        # 最终尝试暴力解码
        try:
            return pd.read_csv(filepath, encoding='utf-8', engine='python', error_bad_lines=False)
        except:
            return None
    except Exception as e:
        logging.error(f"无法读取文件 {filepath.name}: {str(e)}")
        return None


def clean_teacher(df):
    """教师数据清洗"""
    if df is None or df.empty:
        logging.warning("教师数据为空或无效")
        return pd.DataFrame()

    try:
        # 关键列检查
        required_cols = ['term', 'cla_Name', 'bas_Name', 'sub_Name']
        missing_cols = [col for col in required_cols if col not in df.columns]
        if missing_cols:
            raise ValueError(f"缺少必要列: {missing_cols}")

        # 学期处理
        term_split = df['term'].str.split('-', expand=True)
        df['start_year'] = pd.to_numeric(term_split[0], errors='coerce')
        df['semester'] = term_split[2].map({'1': '上', '2': '下'}).fillna('未知学期')

        # 教师信息标准化
        df['teacher_name'] = df['bas_Name'].str.strip().fillna('未知教师')
        df['subject'] = df['sub_Name'].str.replace(r'[^\u4e00-\u9fa5]', '', regex=True).fillna('未知科目')

        return df[[
            'term', 'cla_Name', 'teacher_name', 'subject',
            'start_year', 'semester'
        ]].drop_duplicates()
    except Exception as e:
        logging.error(f"教师数据清洗失败: {str(e)}")
        return pd.DataFrame()


def clean_student_info(df):
    """学生信息清洗（增强版）"""
    if df is None or df.empty:
        logging.warning("学生数据为空或无效")
        return pd.DataFrame()

    try:
        # 关键列检查
        required_cols = ['bf_StudentID', 'bf_Name', 'bf_BornDate', 'cla_Name']
        missing_cols = [col for col in required_cols if col not in df.columns]
        if missing_cols:
            raise ValueError(f"缺少必要列: {missing_cols}")

        # 学号处理
        df['bf_StudentID'] = pd.to_numeric(df['bf_StudentID'], errors='coerce')
        df = df[df['bf_StudentID'].notna()]
        df['bf_StudentID'] = df['bf_StudentID'].astype(int)

        # 出生日期处理
        df['bf_BornDate'] = pd.to_numeric(df['bf_BornDate'], errors='coerce')
        df['age'] = datetime.now().year - df['bf_BornDate']
        df['age'] = df['age'].clip(10, 30)  # 限制合理年龄范围

        # 政治面貌处理
        policy_map = {'一般': 0, '少先队员': 1, '共青团员': 2}
        df['policy_code'] = df['bf_policy'].map(policy_map).fillna(-1).astype(int)

        # 住校信息处理
        df['dorm_status'] = np.where(
            pd.to_numeric(df['bf_zhusu'], errors='coerce') == 1,
            '住校_' + df['bf_qinshihao'].astype(str).str.split('.').str[0],
            '走读'
        )

        # 籍贯分析（安全处理）
        df['is_local'] = np.where(
            df['bf_NativePlace'].astype(str).str.contains('宁波|浙江', regex=True, na=False),
            1, 0
        ).astype(int)

        # 班级名称标准化
        df['cla_Name'] = df['cla_Name'].str.replace('白-', '').fillna('未知班级')

        return df[[
            'bf_StudentID', 'bf_Name', 'bf_sex', 'age',
            'cla_Name', 'policy_code', 'dorm_status', 'is_local'
        ]].drop_duplicates('bf_StudentID')
    except Exception as e:
        logging.error(f"学生信息清洗失败: {str(e)}")
        return pd.DataFrame()


def clean_kaoqin_type(df):
    """考勤类型清洗（增强版）"""
    if df is None or df.empty:
        logging.warning("考勤类型数据为空或无效")
        return pd.DataFrame()

    try:
        # 列名标准化（处理单列或多列情况）
        if df.shape[1] == 1:  # 处理合并列的情况
            expanded = df.iloc[:, 0].str.split('\t', expand=True)
            if expanded.shape[1] >= 4:
                df = expanded.iloc[:, :4]
                df.columns = ['controler_id', 'controler_name',
                              'control_task_order_id', 'control_task_name']

        # 确保必要列存在
        required_cols = ['control_task_order_id', 'control_task_name']
        missing_cols = [col for col in required_cols if col not in df.columns]
        if missing_cols:
            raise ValueError(f"缺少必要列: {missing_cols}")

        # 编码修复（安全处理）
        def safe_decode(text):
            if not isinstance(text, str):
                return str(text)
            try:
                return text.encode('latin1').decode('gbk')
            except:
                try:
                    return text.encode('utf-8').decode('utf-8')
                except:
                    return text

        df['control_task_name'] = df['control_task_name'].apply(safe_decode)

        # 考勤类型标准化
        type_rules = [
            (r'迟到|晚到', '迟到', 2),
            (r'早退', '早退', 3),
            (r'离校|请假', '离校', 4),
            (r'进校', '进校', 0),
            (r'校徽|校服', '校服', 1),
            (r'锻炼|课间操', '课外活动', 0)
        ]

        df['std_type'] = '其他'
        df['severity'] = 0

        for pattern, type_name, severity in type_rules:
            mask = df['control_task_name'].str.contains(pattern, regex=True, na=False)
            df.loc[mask, 'std_type'] = type_name
            df.loc[mask, 'severity'] = severity

        return df[[
            'control_task_order_id',
            'std_type',
            'severity'
        ]].drop_duplicates('control_task_order_id')
    except Exception as e:
        logging.error(f"考勤类型清洗失败: {str(e)}")
        return pd.DataFrame()


def clean_kaoqin(kaoqin_df, kaoqin_type_df):
    """考勤记录清洗（增强版）"""
    if kaoqin_df is None or kaoqin_df.empty:
        logging.warning("考勤记录数据为空或无效")
        return pd.DataFrame()

    if kaoqin_type_df is None or kaoqin_type_df.empty:
        logging.warning("考勤类型数据为空，无法处理考勤记录")
        return pd.DataFrame()

    try:
        # 列名标准化（兼容多种可能的列名）
        col_mapping = {
            'control_task_order_id': ['control_task_order_id', 'ControllerID', '考勤类型ID'],
            'bf_studentID': ['bf_studentID', '学生ID', 'student_id'],
            'DataDateTime': ['DataDateTime', '考勤时间', 'record_time']
        }

        # 构建实际列名映射
        actual_col_map = {}
        for standard_name, possible_names in col_mapping.items():
            for name in possible_names:
                if name in kaoqin_df.columns:
                    actual_col_map[standard_name] = name
                    break
            else:
                raise ValueError(f"找不到匹配列: {standard_name} (尝试: {possible_names})")

        # 重命名列
        kaoqin_df = kaoqin_df.rename(columns={v: k for k, v in actual_col_map.items()})

        # 合并考勤类型
        merged = pd.merge(
            kaoqin_df,
            kaoqin_type_df,
            on='control_task_order_id',
            how='left'
        )

        # 时间处理
        merged['datetime'] = pd.to_datetime(merged['DataDateTime'], errors='coerce')
        merged = merged[merged['datetime'].notna()]

        # 特征工程
        merged['date'] = merged['datetime'].dt.date
        merged['hour'] = merged['datetime'].dt.hour
        merged['day_of_week'] = merged['datetime'].dt.dayofweek
        merged['is_weekday'] = (merged['day_of_week'] < 5).astype(int)

        # 时段划分
        bins = [0, 6, 8, 12, 14, 18, 24]
        labels = ['深夜', '早晨', '上午', '中午', '下午', '晚上']
        merged['time_slot'] = pd.cut(merged['hour'], bins=bins, labels=labels)

        return merged[[
            'bf_studentID', 'datetime', 'date', 'hour',
            'day_of_week', 'is_weekday', 'time_slot',
            'std_type', 'severity'
        ]]
    except Exception as e:
        logging.error(f"考勤记录清洗失败: {str(e)}")
        return pd.DataFrame()


def clean_chengji(df, exam_type=None):
    """成绩数据清洗（增强版）"""
    if df is None or df.empty:
        logging.warning("成绩数据为空或无效")
        return pd.DataFrame()

    try:
        # 关键列检查
        required_cols = ['mes_StudentID', 'mes_Score', 'exam_sdate', 'mes_sub_name']
        missing_cols = [col for col in required_cols if col not in df.columns]
        if missing_cols:
            raise ValueError(f"缺少必要列: {missing_cols}")

        # 特殊成绩处理
        conditions = [
            (df['mes_Score'].astype(str).str.contains('-1|作弊', regex=True, na=False)),
            (df['mes_Score'].astype(str).str.contains('-2|缺考', regex=True, na=False)),
            (df['mes_Score'].astype(str).str.contains('-3|免考', regex=True, na=False))
        ]
        choices = ['作弊', '缺考', '免考']
        df['score_status'] = np.select(conditions, choices, default='正常')

        # 有效成绩转换
        df['score'] = pd.to_numeric(df['mes_Score'], errors='coerce')
        valid_scores = df[df['score'].between(0, 100)]

        # 合并考试类型
        if exam_type is not None and not exam_type.empty:
            if 'EXAM_KIND_ID' in exam_type.columns and 'exam_type' in valid_scores.columns:
                valid_scores = pd.merge(
                    valid_scores,
                    exam_type,
                    left_on='exam_type',
                    right_on='EXAM_KIND_ID',
                    how='left'
                )

        # 时间处理
        valid_scores['exam_date'] = pd.to_datetime(
            valid_scores['exam_sdate'].str.split().str[0],
            errors='coerce'
        )
        valid_scores = valid_scores[valid_scores['exam_date'].notna()]

        # 学科标准化
        valid_scores['subject'] = valid_scores['mes_sub_name'].str.extract(r'([\u4e00-\u9fa5]+)', expand=False)
        valid_scores['subject'] = valid_scores['subject'].fillna('未知科目')

        # 过滤掉未知科目记录
        valid_scores = valid_scores[valid_scores['subject'] != '未知科目']

        # 记录过滤情况
        original_count = len(df)
        filtered_count = len(valid_scores)
        if original_count > filtered_count:
            logging.info(f"过滤掉{original_count - filtered_count}条未知科目成绩记录")

        return valid_scores[[
            'mes_StudentID', 'exam_date', 'score',
            'score_status', 'subject', 'EXAM_KIND_NAME'
        ]].rename(columns={'mes_StudentID': 'bf_studentID'})
    except Exception as e:
        logging.error(f"成绩清洗失败: {str(e)}")
        return pd.DataFrame()


def clean_consumption(df):
    """消费数据清洗（增强版）"""
    if df is None or df.empty:
        logging.warning("消费数据为空或无效")
        return pd.DataFrame()

    try:
        # 关键列检查
        required_cols = ['bf_StudentID', 'MonDeal', 'DealTime']
        missing_cols = [col for col in required_cols if col not in df.columns]
        if missing_cols:
            raise ValueError(f"缺少必要列: {missing_cols}")

        # 金额处理
        df['amount'] = pd.to_numeric(df['MonDeal'], errors='coerce')
        df = df[df['amount'] < 0]  # 只保留支出记录
        df['amount'] = df['amount'].abs()

        # 时间处理
        df['datetime'] = pd.to_datetime(df['DealTime'], errors='coerce')
        df = df[df['datetime'].notna()]

        # 时段划分
        df['period'] = np.where(
            df['datetime'].dt.hour.between(6, 9), '早餐',
            np.where(df['datetime'].dt.hour.between(11, 13), '午餐',
                     np.where(df['datetime'].dt.hour.between(17, 19), '晚餐', '其他'))
        )

        return df[[
            'bf_StudentID', 'datetime', 'amount', 'period'
        ]]
    except Exception as e:
        logging.error(f"消费数据清洗失败: {str(e)}")
        return pd.DataFrame()


def build_student_profiles(students, grades, attendance, consumption):
    """构建学生画像（增强版）"""
    if students is None or students.empty:
        logging.warning("无有效学生数据，无法构建画像")
        return pd.DataFrame()

    try:
        # 基础信息
        profiles = students.set_index('bf_StudentID')

        # 学业表现
        if grades is not None and not grades.empty:
            grade_stats = grades.groupby('bf_studentID').agg({
                'score': ['mean', 'max', 'min', 'count'],
                'subject': lambda x: x.mode()[0] if len(x.mode()) > 0 else None
            })
            grade_stats.columns = ['avg_score', 'best_score', 'worst_score', 'exam_count', 'best_subject']
            profiles = profiles.join(grade_stats, how='left')

        # 考勤行为
        if attendance is not None and not attendance.empty:
            attend_stats = attendance.groupby('bf_studentID').agg({
                'severity': ['mean', 'sum'],
                'std_type': lambda x: (x == '迟到').sum()
            })
            attend_stats.columns = ['severity_mean', 'total_violations', 'late_count']
            profiles = profiles.join(attend_stats, how='left')

        # 消费习惯
        if consumption is not None and not consumption.empty:
            consume_stats = consumption.groupby('bf_StudentID').agg({
                'amount': ['sum', 'mean', 'count'],
                'period': lambda x: x.mode()[0] if len(x.mode()) > 0 else None
            })
            consume_stats.columns = ['total_spent', 'avg_spent', 'txn_count', 'main_period']
            profiles = profiles.join(consume_stats, how='left')

        return profiles.reset_index()
    except Exception as e:
        logging.error(f"画像构建失败: {str(e)}")
        return pd.DataFrame()


def save_results(data_dict, paths):
    """保存清洗结果（增强版）"""
    try:
        # 确保目录存在
        paths['clean_dir'].mkdir(exist_ok=True)
        paths['profile_dir'].mkdir(exist_ok=True)

        # 保存各数据集
        for name, df in data_dict.items():
            if name == 'profiles':
                continue

            if not df.empty:
                output_path = paths['clean_dir'] / f'cleaned_{name}.csv'
                try:
                    df.to_csv(output_path, index=False, encoding='utf-8-sig')
                    logging.info(f"成功保存: {output_path.name}")
                except Exception as e:
                    logging.error(f"保存{name}失败: {str(e)}")

        # 单独保存画像
        if 'profiles' in data_dict and not data_dict['profiles'].empty:
            profile_path = paths['profile_dir'] / 'student_profiles.csv'
            try:
                data_dict['profiles'].to_csv(profile_path, index=False, encoding='utf-8-sig')
                logging.info(f"成功保存学生画像: {profile_path.name}")
            except Exception as e:
                logging.error(f"保存画像失败: {str(e)}")
    except Exception as e:
        logging.error(f"结果保存失败: {str(e)}")
        raise


def main():
    try:
        logging.info("=== 数据清洗开始 ===")
        paths = setup_paths()

        # 加载所有数据
        datasets = {
            'teachers': safe_read_csv(paths['data_dir'] / '1_teacher.csv'),
            'students': safe_read_csv(paths['data_dir'] / '2_student_info.csv'),
            'kaoqin': safe_read_csv(paths['data_dir'] / '3_kaoqin.csv'),
            'kaoqin_type': safe_read_csv(paths['data_dir'] / '4_kaoqintype.csv'),
            'grades': safe_read_csv(paths['data_dir'] / '5_chengji.csv'),
            'exam_type': safe_read_csv(paths['data_dir'] / '6_exam_type.csv'),
            'consumption': safe_read_csv(paths['data_dir'] / '7_consumption.csv')
        }

        # 清洗各数据集
        cleaned_data = {
            'teachers': clean_teacher(datasets['teachers']),
            'students': clean_student_info(datasets['students']),
            'kaoqin_type': clean_kaoqin_type(datasets['kaoqin_type']),
            'grades': clean_chengji(datasets['grades'], datasets['exam_type']),
            'consumption': clean_consumption(datasets['consumption'])
        }

        # 考勤数据需要类型数据
        cleaned_data['attendance'] = clean_kaoqin(
            datasets['kaoqin'],
            cleaned_data['kaoqin_type']
        )

        # 构建学生画像
        cleaned_data['profiles'] = build_student_profiles(
            cleaned_data['students'],
            cleaned_data['grades'],
            cleaned_data['attendance'],
            cleaned_data['consumption']
        )

        # 保存结果
        save_results(cleaned_data, paths)

        # 打印摘要
        print("\n===== 清洗结果摘要 =====")
        print(f"教师记录: {len(cleaned_data['teachers'])}")
        print(f"学生记录: {len(cleaned_data['students'])}")
        print(f"考勤记录: {len(cleaned_data['attendance'])}")
        print(f"成绩记录: {len(cleaned_data['grades'])}")
        print(f"消费记录: {len(cleaned_data['consumption'])}")
        print(f"学生画像: {len(cleaned_data['profiles'])}")
        print(f"\n详细日志见: {paths['base_dir'] / 'data_cleaning.log'}")

        return 0
    except Exception as e:
        logging.critical(f"主流程失败: {str(e)}", exc_info=True)
        print(f"错误: {str(e)} (详见日志)")
        return 1


if __name__ == "__main__":
    exit(main())