from flask import Blueprint, render_template, jsonify, request, send_file
from bank_flow_analysis import BankFlowAnalyzer
import pandas as pd
import numpy as np
from io import BytesIO
from pathlib import Path
# 创建Blueprint
bank_flow_bp = Blueprint('bank_flow', __name__)
analyzer = BankFlowAnalyzer()
# 初始化数据跟路径
analyzer.get_data_dir()
def get_resource_path(relative_path):
    return analyzer.get_base_path(relative_path)
@bank_flow_bp.route('/bank-flow')
def bank_flow():
    """渲染银行流水筛选页面"""
    print('访问银行流水筛选页面')
    return render_template('pages/bank_flow.html', active_page='flight_expense')

@bank_flow_bp.route('/check-data', methods=['GET'])
def check_data():
    """检查是否存在资产关系数据，同时返回亲属关系数据"""
    try:
        return jsonify(analyzer.check_data('银行流水数据库.xlsx', 'source'))

    except Exception as e:
        print(f"[DEBUG] 发生错误: {str(e)}")
        import traceback
        traceback.print_exc()
        return jsonify({
            'exists': False,
            'message': f'检查数据时出错: {str(e)}',
            'folders': []
        })

@bank_flow_bp.route('/get-analysis', methods=['GET'])
def get_bank_flow_analysis():
    # 获取选中的文件夹、亲属和资产类型
    subdirectories = request.args.getlist('folders')
    selected_relatives = request.args.getlist('relatives')
    selected_relatives1 = request.args.getlist('relatives1')
    money_type = request.args.getlist('money_type')
    rule_selecting = request.args.getlist('rule_selecting')
    fengxian_selecting = request.args.getlist('fengxian_selecting')
    low_fengxian1 = request.args.get('low_fengxian1')
    top_fengxian1 = request.args.get('top_fengxian1')
    low_fengxian2 = request.args.get('low_fengxian2')
    top_fengxian2 = request.args.get('top_fengxian2')
    low_fengxian3 = request.args.get('low_fengxian3')
    top_fengxian3 = request.args.get('top_fengxian3')

    select_date = request.args.get('select_date')
    low_price = request.args.get('low_price')
    high_price = request.args.get('top_price')
    select_day = request.args.get('day')
    same_ip = request.args.get('same_ip')
    same_date = request.args.get('same_date')
    before_day = request.args.get('before_day')
    check_file_type = request.args.get('check_file_type')
    filepath = request.args.get('filepath')
    if not before_day:
        before_day = 10
    else:
        before_day = int(before_day)
    is_excel = request.args.get('is_excel')
    #验证select_date 是否为日期格式
    if select_date:
        try:
            pd.to_datetime(select_date)
        except ValueError:
            return jsonify({
                'success': False,
                'message': '日期格式错误'
            })
    
    # print(f"[DEBUG] 选中的文件夹: {subdirectories}")
    # print(f"[DEBUG] 选中的亲属: {selected_relatives}")
   
    data = []
    try:
        # 初始化资源路径
        analyzer.get_source_dir('source')
        # 获取资源路径下的所有子目录名称
        if not subdirectories:
            subdirectories = analyzer.get_subdirectories('source')
        # 处理文件名称
        filename = '银行流水数据库.xlsx'
        if len(subdirectories) > 5:
            return jsonify({
                'success': False,
                'message': '选中的被审查人数不能超过5个'
            })
        yhFile = None
        dir_path = None
        if check_file_type == "2":
            if not filepath:
                return jsonify({
                    'success': False,
                    'message': '自定义文件路径为空'
                })
            filepath_parts = filepath.split('\\')
            if len(filepath_parts) < 2:
                return jsonify({
                    'success': False,
                    'message': '自定义文件路径格式错误'
                })
            yhFile = filepath_parts[1]
            dir_path = Path(filepath_parts[0]).absolute()
            dir_path.mkdir(parents=True, exist_ok=True)
        # 获取数据
        for item in subdirectories:
            file_path = analyzer.source_dir / item
            if is_excel and int(is_excel) == 1:
                header_title = []
            else:
                header_title = ['名称', '交易金额', '借贷标志', 'IP地址', '交易时间', '交易对方名称']
            # 处理银行流水数据
            if check_file_type == "2":
                bank_file_path = dir_path / yhFile
                print(f"尝试访问的文件路径: {bank_file_path}")
                if not bank_file_path.exists():
                    return jsonify({
                        'success': False,
                        'message': f'自定义文件不存在'
                    })
                df = analyzer.read_excel_files(dir_path, yhFile, header_title)
            else:
                bank_file_path = file_path / filename
                if not bank_file_path.exists():
                    return jsonify({
                        'success': False,
                        'message': f'{item}的银行数据流水.xlsx文件不存在'
                    })
                df = analyzer.read_excel_files(file_path, filename, header_title)
            
            df['交易时间'] = df['交易时间'].apply(lambda x: analyzer.parse_date(x))

            # df = analyzer.read_excel_files(file_path, filename, header_title)
            # 只保留姓名在队列selected_relatives中或者交易对方名称在队列selected_relatives1中的值
            # mask = pd.Series(False, index=df.index)
            # if selected_relatives:
            #     mask |= df['名称'].isin(selected_relatives)
            # if selected_relatives1:
            #     mask |= df['交易对方名称'].isin(selected_relatives1)
            # if selected_relatives or selected_relatives1:
            #     df = df[mask]
            if selected_relatives and selected_relatives1:
                mask = df['名称'].isin(selected_relatives) & df['交易对方名称'].isin(selected_relatives1)
                df = df[mask]
            elif selected_relatives:
                mask = df['名称'].isin(selected_relatives)
                df = df[mask]
            elif selected_relatives1:
                mask = df['交易对方名称'].isin(selected_relatives1)
                df = df[mask]
            # 只保留交易金额在队列money_type中的值
            if money_type:
                df = df[df['借贷标志'].isin(money_type)]
            # select_date（格式为20240809），只保留交易时间在select_date日期前后10天并且包含select_date日期的数据
            if select_date:
                try:
                    select_date = pd.to_datetime(select_date)
                    start_date = select_date - pd.Timedelta(days=before_day)
                    end_date = select_date + pd.Timedelta(days=before_day)
                    df['交易时间'] = df['交易时间'].apply(lambda x: analyzer.parse_date(x))
                    df = df[(df['交易时间'] >= start_date) & (df['交易时间'] <= end_date)]
                    # print(df)
                except ValueError:
                    return jsonify({
                        'success': False,
                        'message': '日期格式错误'
                    })

            # 保留交易金额在low_price和high_price之间的值，如果low_price为空，则保留交易金额小于等于high_price的值,如果high_price为空，则保留交易金额大于等于low_price的值
            if low_price and high_price:
                low_price = float(low_price)
                high_price = float(high_price)
                df = df[(df['交易金额'] >= low_price) & (df['交易金额'] <= high_price)]
            elif low_price:
                low_price = float(low_price)
                df = df[df['交易金额'] >= low_price]
            elif high_price:
                high_price = float(high_price)
                df = df[df['交易金额'] <= high_price]
            # 根据交易时间对 df 数据进行升序排序
            df = df.sort_values('交易时间', ascending=True)
            if rule_selecting:
                mask = pd.Series(False, index=df.index)
                if "1" in rule_selecting:
                    # 规则1处理逻辑
                    sub_mask = pd.Series(False, index=df.index)
                    if "1" in fengxian_selecting and low_fengxian1 and top_fengxian1:
                        low_fengxian1 = float(low_fengxian1)
                        top_fengxian1 = float(top_fengxian1)
                        sub_mask |= (df['交易金额'] > low_fengxian1) & (df['交易金额'] <= top_fengxian1)
                    if "2" in fengxian_selecting and low_fengxian2 and top_fengxian2:
                        low_fengxian2 = float(low_fengxian2)
                        top_fengxian2 = float(top_fengxian2)
                        sub_mask |= (df['交易金额'] > low_fengxian2) & (df['交易金额'] <= top_fengxian2)
                    if "3" in fengxian_selecting and low_fengxian3 and top_fengxian3:
                        low_fengxian3 = float(low_fengxian3)
                        top_fengxian3 = float(top_fengxian3)
                        sub_mask |= (df['交易金额'] > low_fengxian3) & (df['交易金额'] <= top_fengxian3)
                    mask |= sub_mask

                if "2" in rule_selecting and select_day:
                    # 规则2处理逻辑
                    select_day = int(select_day)
                    df['交易时间'] = pd.to_datetime(df['交易时间']).dt.strftime('%Y-%m-%d')
                    df = df.sort_values(by=['名称', '借贷标志', '交易时间'])
                    # 计算每条数据向后指定天数内的相同姓名和借贷标志的交易金额总和
                    def calculate_sum(group):
                        group = group.sort_values('交易时间')
                        group['交易时间'] = pd.to_datetime(group['交易时间'])
                        result = []
                        for idx, row in group.iterrows():
                            end_time = row['交易时间'] + pd.Timedelta(days=select_day)
                            mask = (group['交易时间'] >= row['交易时间']) & (group['交易时间'] <= end_time)
                            result.append(group.loc[mask, '交易金额'].sum())
                        return pd.Series(result, index=group.index)

                    df[f'{select_day}天内的交易金额总和'] = df.groupby(['名称', '借贷标志']).apply(calculate_sum).values

                if "3" in rule_selecting:
                    # 假设原代码中sale_date应为same_date，可能是笔误
                    if same_date == "1":
                        # 转换交易时间为 YYYY-mm-dd 格式
                        df['交易日期'] = pd.to_datetime(df['交易时间']).dt.strftime('%Y-%m-%d')
                        # 筛选出名称不同但交易日期相同的数据
                        grouped = df.groupby('交易日期')
                        filtered_data = []
                        for _, group in grouped:
                            if len(group['名称'].unique()) > 1:
                                filtered_data.append(group)

                        if filtered_data:
                            df = pd.concat(filtered_data, ignore_index=True)
                        # 去除数据中重复的名称，每个名称只保留一条数据
                        df = df.drop_duplicates(subset=['名称', '交易日期'], keep='first')
                    if same_date == "2":
                        df['IP前三段'] = df['IP地址'].str.extract(r'^(\d+\.\d+\.\d)')
                        df = df[~((df['IP前三段'].isna()) | (df['IP前三段'] == '@N'))]
                        # 按IP前三段分组
                        grouped = df.groupby('IP前三段')
                        filtered_data = []
                        multi_name_prefixes = df.groupby('IP前三段')['名称'].nunique()
                        multi_name_prefixes = multi_name_prefixes[multi_name_prefixes > 1].index.tolist()
                        df = df[df['IP前三段'].isin(multi_name_prefixes)]
                        # 每个IP地址前三段相同的数据，名称只保留一条数据
                        df = df.drop_duplicates(subset=['名称', 'IP前三段'], keep='first')
                if any(rule in rule_selecting for rule in ["1", "2", "3"]):
                    if "1" in rule_selecting:
                        df = df[sub_mask]
            if df is not False:
                data.append(df)
            else:
                return jsonify({
                    'success': False, 
                    'message': '数据读取失败'
                })
        if not data:
            return jsonify({
                'success': False,
                'message': '没有数据'
            })
        # 合并所有数据
        combined_df = pd.concat(data, ignore_index=True)
        
        # 获取所有列名
        columns = combined_df.columns.tolist()

        # 获取选择的字段
        selected_fields = request.args.getlist('fields')
        if selected_fields:
            # 确保所有请求的字段都存在
            valid_fields = [field for field in selected_fields if field in columns]
            if valid_fields:
                combined_df = combined_df[valid_fields]
            else:
                return jsonify({'error': '没有找到有效的字段'})
        # is_excel存在并且等于1，将字符串转换为整数后进行判断
        if is_excel and int(is_excel) == 1:
            # 创建文件名
            filename = "银行流水往来分析结果.xlsx"
            output = BytesIO()
            with pd.ExcelWriter(output, engine='openpyxl') as writer:
                combined_df.to_excel(writer, index=False, sheet_name='Sheet1')
            output.seek(0)
            try:
                return send_file(output,
                    mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
                    as_attachment=True,
                    download_name=filename
                )
            except Exception as e:
                print(f"导出Excel文件时出错: {str(e)}")
                return jsonify({
                    'success': False,
                    'error': f'导出Excel文件时出错: {str(e)}'
                })
        else:
            # 转换数据为字典列表
            for col in combined_df.select_dtypes(include=['datetime64']).columns:
                combined_df[col] = combined_df[col].dt.strftime('%Y-%m-%d %H:%M:%S')
            data = combined_df.to_dict('records')
            
            # 处理数据中的特殊值（NaN等）
            for record in data:
                for key, value in record.items():
                    if pd.isna(value):
                        record[key] = None
                    elif isinstance(value, (pd.Timestamp, pd.DatetimeTZDtype)):
                        record[key] = value.isoformat()
                    elif isinstance(value, (np.int64, np.float64)):
                        record[key] = float(value) if isinstance(value, np.float64) else int(value)
            
            return jsonify({
                'success': True,
                'data': data,
                'columns': columns,
                'total_records': len(data)
            })

    except Exception as e:
        print(f"获取数据出错: {str(e)}")  # 打印错误信息
        return jsonify({
            'success': False,
            'error': f'生成分析结果时出错: {str(e)}'
        })
