import importlib
from flask import Blueprint, current_app, jsonify,request,g
from sqlalchemy import text
import pandas as pd
import numpy as np
import base64
from PIL import Image
import io,os
import math
from datetime import datetime, timedelta
import variableUtils
import utils
from jsonToCurveOption import getInitCurveOption,getVirtualDict
from AccountJSON import Names as AccountNames

curve = Blueprint('curve', __name__)

batchName = utils.getBatchNo()

@curve.errorhandler(ValueError)
def handle_value_error(error):
    response = jsonify({'error': str(error)})
    response.status_code = 400
    return response

@curve.route('/api/curve/option', methods=['GET'])
def get_curve_option():
    # print('option')
    db = request.args.get('db','local')
    sub_fields = request.args.get('fields','')
    auth_groups = request.args.get('groups','')
    tbid = request.args.get('tbid',1)#Data1 Event1
    ymode = request.args.get('ymode',1) #0:一对一，1:一对多(默认)
    curve_name = request.args.get('curve_name','rt') #0:一对一，1:一对多(默认)
    lang = request.args.get('lang','zh')
    offset = request.args.get('offset',60)
    offset = int(offset)
    option = getInitCurveOption(db,tbid,curve_name,sub_fields,auth_groups,int(ymode),lang,offset)
    response = jsonify(option)
    # response.headers.add('Access-Control-Allow-Origin', '*')
    return response
    # except ValueError as e:
    #     print(e)
    #     return str(e)


@curve.route('/api/curve/saveimage', methods=['POST'])
def save_image():
    base64_str = request.json.get('data')
    curve_name = request.json.get('curve_name')
    file_name = request.json.get('file_name')
    image_data = base64.b64decode(base64_str.split(',')[1])
    image = Image.open(io.BytesIO(image_data))
    file_path = os.path.join(utils.getFilesPath(),curve_name+'curve',file_name)
    dirname = os.path.dirname(file_path)
    # 如果目录不存在，创建目录
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    # save_path = os.path.join(os.getcwd(),save_path, file_name)
    image.save(file_path)
    return jsonify({'result': 1})


@curve.route('/api/get_time_range')
def getRecordTimeByBatch2():
    db = request.args.get('db', 'local')
    table_name = request.args.get('table_name', 'data1')
    batch = request.args.get('batch', '')

    dict = getRecordTimeByBatch(db,table_name,batch)
    return jsonify(dict)

# 根据批号查出开始时间和结束时间
def getRecordTimeByBatch(db,table_name,batchNo):
    query = text(f"SELECT MIN(RecordTime) AS start_time, MAX(RecordTime) AS end_time FROM {table_name} WHERE {batchName}='{batchNo}'")
    conn = current_app.engine_dict[db].connect()
    result = pd.read_sql_query(query, conn)
    conn.close()
    # print(result)
    # 获取查询结果中的start_time和end_time字段的值，并封装为[start_time, end_time]的形式返回
    start_time = result['start_time'][0].strftime('%Y-%m-%d %H:%M:%S') if not pd.isna(result['start_time'][0]) else None
    end_time = result['end_time'][0].strftime('%Y-%m-%d %H:%M:%S') if not pd.isna(result['end_time'][0]) else None
    return {
        "start_time":start_time,
        "end_time":end_time
    }

def get_last_recordTime(db,table_name,number_hour):
    query_sql = text(f"""
         SELECT RecordTime FROM {table_name}
         ORDER BY RecordTime DESC
         LIMIT 1
         """)
    engine = current_app.engine_dict[db]
    conn = engine.connect()
    result = conn.execute(query_sql).fetchone()
    # conn.execute(query_sql)
    # result = conn.fetchone()
    conn.close()
    if result:
        end_time = result[0]
        start_time = end_time - timedelta(hours=number_hour)
        time_range = {'start_time': start_time, 'end_time': end_time}
        return time_range
    else:
        return None #"No records found"


@curve.route('/api/curve/query', methods=['POST'])
def time_query():
    # 添加参数验证和默认值
    db = request.json.get('db', 'local')  # 设置默认值为 'local'
    if not db:
        return jsonify({'error': 'Database parameter is required'}), 400

    start_time = request.json.get('start_time')
    end_time = request.json.get('end_time')
    batch = request.json.get('batch')
    table_name = request.json.get('table_name')
    # if not table_name:
    #     return jsonify({'error': 'Table name is required'}), 400

    last = request.json.get('last')
    fields = request.json.get('fields', '')
    lang = request.json.get('lang', 'zh')
    freq = request.json.get('freq', 3)
    freq = f'{freq}S'

    try:
        engine = current_app.engine_dict[db]
    except KeyError:
        return jsonify({'error': f'Invalid database: {db}'}), 400
    except Exception as e:
        return jsonify({'error': str(e)}), 500

    if fields:
        fields += ','
    else:
        fields = ''
    # hasDecode = fields in 'urlDecode'
    # fields = utils.urlDecode(fields)
    select_text = fields_to_query(fields)
    # as g 拆分
    # 是否是虚拟类型

    if last:#最近一条数据往前倒推 last 小时
        dic = get_last_recordTime(db,table_name,int(last))
        if dic != None:
            start_time = dic['start_time']
            end_time = dic['end_time']

    # 根据传入的参数动态构造 SQL 查询语句
    if start_time and end_time and batch:
        query_sql = text(f"""
            SELECT {select_text + 'RecordTime'}
            FROM {table_name}
            WHERE RecordTime BETWEEN '{start_time}' AND '{end_time}' AND {batchName} = '{batch}'
        """)
    elif start_time and end_time:
        query_sql = text(f"""
            SELECT {select_text + 'RecordTime'}
            FROM {table_name}
            WHERE RecordTime BETWEEN '{start_time}' AND '{end_time}'
        """)
    elif batch:
        query_sql = text(f"""
            SELECT {select_text + 'RecordTime'}
            FROM {table_name}
            WHERE {batchName} = '{batch}'
        """)
        dic = getRecordTimeByBatch(db,table_name,batch)
        start_time = dic['start_time']
        end_time = dic['end_time']
    else:
        return jsonify([])

    # 需要的长度
    idx = pd.date_range(start=start_time, end=end_time, freq=freq,name='RecordTime')


    try:
        engine = current_app.engine_dict[db]
    except ValueError as e:
        print(e)
        return str(e)
    conn = engine.connect()

    # 执行 SQL 查询并读取数据到 DataFrame
    df = utils.read_sql(query_sql, conn)
    conn.close()

    if len(df) == 0:
        fs = fields.split(',')
        fs = [item for item in fs if item ] #去除空字符串
        series = []
        for f in fs:
            series.append({
            'field':f,
            'data':[]
            })

        result = {
            'xAxis':{
                'data':idx.strftime('%Y-%m-%d\n%H:%M:%S').tolist()
            },
            'series':series
        }

        response = jsonify(result)
        return response

    # 将 RecordTime 列设置为唯一索引
    df.set_index('RecordTime', inplace=True)
    df = df.groupby('RecordTime').mean(numeric_only=True)#相同的索引去重取平均值

    sampled_df = df.reindex(idx,method='ffill',limit=10) # 向上覆盖10个记录

    data = sampled_df.to_dict(orient='list')
    # 遍历字典中的列表并将 NaN 值替换为 null
    for key, values in data.items():
        pcs = variableUtils.getPCS(key,2)
        data[key] = [None if pd.isna(x) else utils.format_num(x,pcs) for x in values]

    fs = fields.split(',')
    fs = [item for item in fs if item ] #去除空字符串
    series = []
    for f in fs:
        series.append({
        'field':f,
        'data':data.get(f,[])
        })

    result = {
        'xAxis':{
            'data':sampled_df.index.strftime('%Y-%m-%d\n%H:%M:%S').tolist()
        },
        'series':series
    }

    response = jsonify(result)
    return response



@curve.route('/api/account-list')
def get_account_list():
    return jsonify(AccountNames)
    db = request.args.get('db', 'local')
    table_name = request.args.get('table_name', 'data1')
    batch = request.args.get('batch', '')

    dict = getRecordTimeByBatch(db,table_name,batch)
    return jsonify(dict)


@curve.route('/api/keep-db-alive')
def keep_db_alive():
    db = request.args.get('db', 'local')

    try:
        engine = current_app.engine_dict[db]
    except ValueError as e:
        return jsonify({'error': str(e)}), 500

    with engine.connect() as con:
        try:
            # 执行一个简单的查询，例如查询当前时间
            result = con.execute("SELECT NOW()").fetchone()
            return jsonify({'status': 'success', 'time': result[0]})
        except Exception as e:
            return jsonify({'error': str(e)}), 500

@curve.route('/api/is-batch-no-exists')
def is_batch_no_exists():
    db = request.args.get('db', 'local')
    tbid = request.args.get('tbid', 1)
    batch_no = request.args.get('batch_no')  # Fetch batch_no from the query parameters

    if not batch_no:
        #return jsonify({'batch_exists': True})
        return jsonify({'error': 'No batch_no provided'}), 400  # Return an error if no batch_no is provided

    try:
        engine = current_app.engine_dict[db]
    except ValueError as e:
        return jsonify({'error': str(e)}), 500

    with engine.connect() as con:
        try:
            # Use SQLAlchemy's text() function to construct a safe SQL query
            query_sql = text("SELECT COUNT(*) AS count FROM Index1 WHERE bm_BatchNo = :batch_no")
            result = con.execute(query_sql, {'batch_no': batch_no}).fetchone()
            batch_exists = result['count'] > 0
            return jsonify({'batch_exists': batch_exists})
        except Exception as e:
            return jsonify({'error': str(e)}), 500
# http://172.16.14.99:5000/api/batch-no-list?db=mysql-localhost&batch_no=abcd1234
# http://172.16.14.99:5000/api/batch-no-list?db=mysql-localhost&lang=zh&page_size=10&page_number=1&step_rcp_type=0,1,2,3,4,5,6&accounts=001,TOFFLON
#page_number从1开始
@curve.route('/api/batch-no-list')
def get_batch_no_list():
    db = request.args.get('db', 'local')
    tbid = request.args.get('tbid',1)
    page_size = request.args.get('page_size', default=10, type=int)
    page_number = request.args.get('page_number', default=1, type=int)
    # Filters
    step_rcp_type = request.args.get('step_rcp_type',default='0,1,2,3,4,5,6')
    if step_rcp_type == '':
        step_rcp_type = '0,1,2,3,4,5,6'

    Accounts = request.args.get('accounts',default=','.join(AccountNames))
    if Accounts == '':
        Accounts = ','.join(AccountNames)

    use_not_in = request.args.get('use_not_in', default=0, type=int) # 默认不使用NOT


    try:
        engine = current_app.engine_dict[db]
    except ValueError as e:
        print(e)
        return str(e)

    con = engine.connect()

    # 查询 BatchNo 字段，并去重
    # query = "SELECT BatchNo FROM index1"
    # batchNo = 'BatchNo'
    table_name = 'Index' + str(tbid)

    if use_not_in:
        # 计算 notAccounts 不区分大小写
        # accounts_set = set(account.lower() for account in Accounts.split(','))
        # allAccounts = set(account.lower() for account in AccountNames)
        accounts_set = set(Accounts.split(','))
        allAccounts = set(AccountNames)
        not_accounts_set = allAccounts - accounts_set
        notAccounts = ','.join(not_accounts_set)

        notAccounts = utils.add_quotes(notAccounts)
        user_condition = f"bm_LastUser NOT IN ({notAccounts})"
    else:
        Accounts = utils.add_quotes(Accounts)
        user_condition = f"bm_LastUser IN ({Accounts})"

    # 更新 SQL 查询
    query_sql = text(f"""
        SELECT *
        FROM {table_name}
        WHERE step_rcp_run_step_rcp_type IN ({step_rcp_type}) AND
        ({user_condition})
        AND bm_LastUser IS NOT NULL AND bm_LastUser <> ''
        AND bm_BatchNo IS NOT NULL AND bm_BatchNo <> ''
    """)
    # print(query_sql)
    df = utils.read_sql(query_sql, con)
    con.close()
    # print(df)
    df = df.drop_duplicates(batchName)


    total_items = len(df)
    total_pages = math.ceil(total_items / page_size)
    skip = (page_number - 1) * page_size

    # 将时间列转换为pandas时间序列对象
    time_series = pd.to_datetime(df['RecordTime'])
    # 将时间序列对象格式化为指定的时间格式
    formatted_time = time_series.dt.strftime('%Y-%m-%d %H:%M:%S')
    # 将格式化后的时间列添加到DataFrame中
    df['RecordTime'] = formatted_time


    df = df.sort_values('RecordTime', ascending=False)
    paginated_df = df.iloc[skip:skip+page_size]



    data = paginated_df.to_dict(orient='records')

    return jsonify({
        'data': data,
        'page_number': page_number,
        'page_size': page_size,
        'total_items': total_items,
        'total_pages': total_pages
    })




    # 返回结果

# ‘dict’ (默认值)：将DataFrame转换为{列 -> {索引 -> 值}}的形式。
# ‘list’：将DataFrame转换为[{列 -> 值},{列 -> 值}, …,{列 -> 值}]的形式。
# ‘series’：将DataFrame转换为{列 -> {索引 -> 值}}的形式。
# ‘records’：将DataFrame转换为[{'列1':值1, '列2':值2, ...,'列n':值n}, {'列1':值1, '列2':值2, ...,'列n':值n}, ...]的形式。

def fields_to_query(fields):
    virtual_dict = getVirtualDict()
    result = ''
    fs = fields.split(',')
    fs = [item for item in fs if item ] #去除空字符串
    for f in fs:
        if f in virtual_dict:
            ext_dict = virtual_dict[f]['ext_dict']
            exp = ext_dict.get('exp','')
            if exp != '':
                exp = f"{exp} as {f}"
                result += exp + ','
            else:
                result += f + ','
        else:
            result += f + ','
    return result


#卢湘仪
@curve.route('/api/lxy-batch-list')
def get_lxy_batch_list():
    try:
        # 获取分页参数
        page_size = request.args.get('page_size', default=10, type=int)
        page_number = request.args.get('page_number', default=1, type=int)

        # 构建SQL查询，注意这里使用 current_app.engine_dict['mysql-localhost']
        query_sql = text("""
            SELECT DISTINCT virtual_Dummy_batchno as batch_no,
                   MAX(RecordTime) as record_time
            FROM data1
            WHERE virtual_Dummy_batchno IS NOT NULL
            AND virtual_Dummy_batchno != ''
            GROUP BY virtual_Dummy_batchno
            ORDER BY record_time DESC
        """)

        try:
            # 使用正确的数据库连接标识符
            engine = current_app.engine_dict['mysql-localhost']
        except KeyError:
            print("数据库连接失败: 找不到mysql-localhost连接")
            return jsonify({'error': '数据库配置错误'}), 500

        try:
            conn = engine.connect()
            # 执行查询并读取数据到DataFrame
            df = utils.read_sql(query_sql, conn)
            conn.close()
        except Exception as e:
            print(f"数据库查询错误: {str(e)}")
            return jsonify({'error': '数据库查询失败'}), 500

        # 计算总记录数和总页数
        total_items = len(df)
        total_pages = math.ceil(total_items / page_size)

        # 分页
        start_idx = (page_number - 1) * page_size
        end_idx = start_idx + page_size
        paginated_df = df.iloc[start_idx:end_idx]

        # 格式化时间
        paginated_df['record_time'] = pd.to_datetime(paginated_df['record_time']).dt.strftime('%Y-%m-%d %H:%M:%S')

        # 转换为字典列表
        data = paginated_df.to_dict(orient='records')

        # 返回结果
        response = {
            'data': data,
            'page_size': page_size,
            'page_number': page_number,
            'total_items': total_items,
            'total_pages': total_pages
        }

        return jsonify(response)

    except Exception as e:
        print(f"发生错误: {str(e)}")
        return jsonify({'error': '服务器内部错误'}), 500

#知正
@curve.route('/api/zz-batch-list')
def get_zz_batch_list():
    try:
        # 获取分页参数
        page_size = request.args.get('page_size', default=10, type=int)
        page_number = request.args.get('page_number', default=1, type=int)

        # 构建SQL查询，注意这里使用 current_app.engine_dict['mysql-localhost']
        query_sql = text("""
            SELECT DISTINCT bm_BatchNo as batch_no,
                   MAX(RecordTime) as record_time
            FROM data1
            WHERE bm_BatchNo IS NOT NULL
            AND bm_BatchNo != ''
            GROUP BY bm_BatchNo
            ORDER BY record_time DESC
        """)

        try:
            # 使用正确的数据库连接标识符
            engine = current_app.engine_dict['mysql-localhost']
        except KeyError:
            print("数据库连接失败: 找不到mysql-localhost连接")
            return jsonify({'error': '数据库配置错误'}), 500

        try:
            conn = engine.connect()
            # 执行查询并读取数据到DataFrame
            df = utils.read_sql(query_sql, conn)
            conn.close()
        except Exception as e:
            print(f"数据库查询错误: {str(e)}")
            return jsonify({'error': '数据库查询失败'}), 500

        # 计算总记录数和总页数
        total_items = len(df)
        total_pages = math.ceil(total_items / page_size)

        # 分页
        start_idx = (page_number - 1) * page_size
        end_idx = start_idx + page_size
        paginated_df = df.iloc[start_idx:end_idx]

        # 格式化时间
        paginated_df['record_time'] = pd.to_datetime(paginated_df['record_time']).dt.strftime('%Y-%m-%d %H:%M:%S')

        # 转换为字典列表
        data = paginated_df.to_dict(orient='records')

        # 返回结果
        response = {
            'data': data,
            'page_size': page_size,
            'page_number': page_number,
            'total_items': total_items,
            'total_pages': total_pages
        }

        return jsonify(response)

    except Exception as e:
        print(f"发生错误: {str(e)}")
        return jsonify({'error': '服务器内部错误'}), 500
