from flask import Blueprint, current_app, jsonify,request,g
from sqlalchemy import text
import os
import pandas as pd
import tableJSON
import math
import utils

batchName = utils.getBatchNo()

table = Blueprint('table', __name__)

@table.route('/api/table/fields', methods=['GET'])
def get_fields():
    db = request.args.get('db','local')
    tbid = request.args.get('tbid',1)#Data1 Event1
    fields = tableJSON.getFields(db,tbid)
    return jsonify(fields)


@table.route('/api/table/qs/<query_or_savecsv>', methods=['POST'])
def time_query_or_savecsv(query_or_savecsv):
    start_time = request.json.get('start_time')
    end_time = request.json.get('end_time')
    batch = request.json.get('batch')
    table_name = request.json.get('table_name')
    db = request.json.get('db')
    fields2 = request.json.get('fields')
    if fields2 != None:
        fields = ','.join(fields2)
    else:
        fields = '*'
    select_text = fields_to_query(fields)

    lang = request.json.get('lang')
    freq = request.json.get('freq',3)
    freq = str(freq) + 'S'
    page_size = request.json.get('page_size')
    page_number = request.json.get('page_number')

    # 根据传入的参数动态构造 SQL 查询语句
    if start_time and end_time and batch:
        query_sql = text(f"""
            SELECT {select_text + ',RecordTime'}
            FROM {table_name}
            WHERE RecordTime BETWEEN '{start_time}' AND '{end_time}' AND {batchName} = '{batch}'
        """)
    elif start_time and end_time:
        query_sql = text(f"""
            SELECT {select_text + ',RecordTime'}
            FROM {table_name}
            WHERE RecordTime BETWEEN '{start_time}' AND '{end_time}'
        """)
    elif batch:
        query_sql = text(f"""
            SELECT {select_text + ',RecordTime'}
            FROM {table_name}
            WHERE {batchName} = '{batch}'
        """)
        dic = getRecordTimeByBatch(db,table_name,batch)
        start_time = dic['start_time']
        end_time = dic['end_time']
    else:
        return jsonify([])

    # 执行 SQL 查询
    idx = pd.date_range(start=start_time, end=end_time, freq=freq,name='RecordTime')


    try:
        engine = current_app.engine_dict[db]
    except ValueError as e:
        print(e)
        return str(e)
    conn = engine.connect()

    # 执行 SQL 查询并读取数据到 DataFrame
    df = utils.read_sql(query_sql, conn)
    conn.close()
    
    # 将 RecordTime 列设置为唯一索引
    df.set_index('RecordTime', inplace=True)
    df = df.groupby('RecordTime').mean(numeric_only=True)#相同的索引去重取平均值
    
    if len(df) == 0:
        response = jsonify({
            'data':[],
            'page_size': page_size,
            'total_items':0,
            'total_pages': 0
        })
        return response
    
    sampled_df = df.reindex(idx,method='ffill',limit=10) # 向上覆盖10个记录
    sampled_df = sampled_df.fillna(value='--')

    if query_or_savecsv == 'savecsv':
        #更换所有的列名
        fields = tableJSON.getFields(db,table_name[-1])
        new_columns = {}
        for f in fields:
            new_columns[f['Name']] = f['NameInFieldGroup']
        
        sampled_df = sampled_df.rename(columns=new_columns)
        # sampled_df = sampled_df.reindex()
        start_time = start_time.replace(":", "-").replace(" ", "-").replace("\n", "T")
        end_time = end_time.replace(":", "-").replace(" ", "-").replace("\n", "T")
        file_name = f"Start_{start_time}_End_{end_time}"
        if batch != None:
            file_name = f"Batch_{batch}_{file_name}.csv"
        else:
            file_name = f"Data_{file_name}.csv"
        file_dir = os.path.join(utils.getFilesPath(),'table')
        if not os.path.exists(file_dir): # 创建路径
            os.makedirs(file_dir)
        sampled_df.to_csv(os.path.join(file_dir,file_name), sep=',', index=True)
        return jsonify({
            'result':1
        })
    total_items = len(sampled_df)
    total_pages = math.ceil(total_items / page_size)
    skip = (page_number - 1) * page_size
    
    paginated_df = sampled_df.iloc[skip:skip+page_size]


    paginated_df = paginated_df.reset_index()
    # 将时间列转换为pandas时间序列对象
    time_series = pd.to_datetime(paginated_df['RecordTime'])
    # 将时间序列对象格式化为指定的时间格式
    formatted_time = time_series.dt.strftime('%Y-%m-%d %H:%M:%S')
    # 将格式化后的时间列添加到DataFrame中
    paginated_df['RecordTime'] = formatted_time
    data = paginated_df.to_dict(orient='records')
    response = jsonify({
        'data':data,
        'page_size': page_size,
        'total_items':total_items,
        'total_pages': total_pages                             
    })
    return response


# 根据批号查出开始时间和结束时间
def getRecordTimeByBatch(db,table_name,batchNo):
    query = text(f"SELECT MIN(RecordTime) AS start_time, MAX(RecordTime) AS end_time FROM {table_name} WHERE {batchName}='{batchNo}'")
    conn = current_app.engine_dict[db].connect()
    result = pd.read_sql_query(query, conn)
    conn.close()
    # 获取查询结果中的start_time和end_time字段的值，并封装为[start_time, end_time]的形式返回
    start_time = result['start_time'][0].strftime('%Y-%m-%d %H:%M:%S') if not pd.isna(result['start_time'][0]) else None
    end_time = result['end_time'][0].strftime('%Y-%m-%d %H:%M:%S') if not pd.isna(result['end_time'][0]) else None
    return {
        "start_time":start_time,
        "end_time":end_time
    }


def fields_to_query(fields):
    virtual_dict = tableJSON.getVirtualDict()
    result = []
    fs = fields.split(',')
    fs = [item for item in fs if item ] #去除空字符串
    for f in fs:
        if f in virtual_dict:
            ext_dict = virtual_dict[f]['ext_dict']
            exp = ext_dict.get('exp','')
            if exp != '':
                exp = f"{exp} as {f}"
                result.append(exp)
            else:
                result.append(f)
        else:
            result.append(f)
    return ','.join(result)