#!/usr/bin/python3
# -*- coding:utf-8 -*-
# __author__ = '__Tony__'
import json
import traceback

import numpy as np
import orjson
import re

from openpyxl.styles.builtins import total

from errors import BaseAPIException
from log import log
from fastapi import APIRouter, Form, Body, File, UploadFile, Depends, Query
from fastapi.responses import JSONResponse, Response, StreamingResponse
from database import get_db
from tutorial.chapter05 import sub_query
from utility.utility import download_zip, zip_files, download_file_stream, json_result_reps, result, is_number
from app.yg_crm.services import One2Many, getBillDetailByBillNo, add_or_update_records
from sqlalchemy.orm import Session, aliased, joinedload, selectinload
from app.yg_crm import crud, orm, schemas
from app.yg_crm.orm import CheckingBillFromExcel, CheckingBill, TradePartner, Client, CheckingBillFee, YGFeeType
import pandas as pd
from os import path
from app.oss_api.services import OssBaseHandler
from io import BytesIO
from typing import List
from datetime import datetime
from app.yg_crm.services.excelHandler import export_excel
from app.yg_crm.services import getExcelFileCreateYear
from decimal import Decimal
from typing import Dict
from sqlalchemy import create_engine, Column, Integer, String, Float, case, func
from utility.utility import dateFormat
from sqlalchemy.orm import noload

yg_router = APIRouter()


@yg_router.post("/excel_one2many")
async def excel_one2many(file: UploadFile = File(alias='file'), styleTpl: str = Form(default='')):
    source_file, content_type = file.filename, file.content_type
    source_file_contents = await file.read()
    filesize = len(source_file_contents)
    # 文件名处理
    now = datetime.now()
    timestamp = now.strftime("%Y%m%d%H%M%S")
    original_excel_filename = path.basename(source_file)
    new_filename = "{0}-{1}".format(path.splitext(original_excel_filename)[0], timestamp)
    log.info(original_excel_filename, original_excel_filename)
    # 模板处理处理
    tpl_data = None
    json_params = {}
    if styleTpl:
        oss = OssBaseHandler(bucket_name="waybill")
        obj = oss.getObject(filepath=styleTpl)
        tpl_data = BytesIO(obj['data'])
        json_params = oss.is_json(obj['config'])
    # panda系列生成文件，压缩文件
    df = pd.read_excel(source_file_contents)
    # NaN值处理, NaN会造空值无法判断，数据无法处理
    df.fillna('', inplace=True)
    # df.dropna(inplace=True)
    # print(df)
    # df.columns
    biz_params = {'feeRate': 30.97}
    if json_params:
        biz_params.update(json_params)
        log.info(biz_params)

    # DF数据预处理,防止数据不规范的情况,
    ## 找表头，确定表格核心数据区域
    header_row_index = None
    bill_field_name = None
    pattern = re.compile("[^分]+{}.*".format("单号"))
    match = [v for v in df.columns if isinstance(v, str) and pattern.match(str(v))]
    if match:
        bill_field_name = match[0]
    else:
        for index, row in df.iterrows():
            for v in row.values:
                if isinstance(v, str) and pattern.match(str(v)):
                    header_row_index = index
                    bill_field_name = v
                    break
        log.info("header_row_index:", header_row_index)
        log.info("bill_field_name:", bill_field_name)
        log.info(df.iloc[header_row_index].tolist())
        if header_row_index is not None:
            header_columns = df.iloc[header_row_index].tolist()
            # 删除字段列前面的行
            df = df.drop(range(header_row_index + 1)).reset_index(drop=True)
            # 指定字段了，df才能正常显示
            df.columns = header_columns
        else:
            raise BaseAPIException(detail="没有找到表头！请检查数据源表头是否有‘单号’列信息！", )
    print(df.columns)
    # 去除列名为NaN或以Unnamed开头的列
    df = df.loc[:, ~df.columns.isna()]  # 去除NaN列名
    df = df.loc[:, ~df.columns.str.contains('^Unnamed', na=False)]  # 去除Unnamed列
    print("df.columns:", df.columns)
    # columns预处理
    for c in df.columns:
        if '\n' in c or '/' in c:
            print(c)
            df.rename(columns={c: c.replace('\n', '').replace('/', '')}, inplace=True)

    writer1 = One2Many(df=df, sheet_name='主单', biz_params=biz_params)
    # writer1.save(tpl_file='./data/waybill_489-60197211_waybill_new.xlsx')
    files_writers = writer1.save(tpl_data, bill_field_name) # tpl_data成了必须项
    if files_writers:
        zip_content = await zip_files(files=files_writers)
    else:
        raise BaseAPIException(detail="没有要保存的记录！！", )
    # 返回ZIP文件作为响应
    log.info("finally...")
    return download_zip(zip_content, f'{new_filename}.zip')
    # return download_file_stream(zip_content, f'{new_filename}.zip')


"""
示例
source_file /Users/tonywu/Public/Demo/test_panda/data/bill_datasource.xlsx
oss: templates/waybill_template_01.xlsx
"""


@yg_router.post("/excel_loaded_check")
async def excel_loaded_check(db: Session = Depends(get_db), files: List[UploadFile] = File(...)):
    keyword1 = '单号'
    keyword2 = '计费重量'
    dfs = {}
    file_details = {}
    errors = {"no_billNo": [], "no_measure_weight": []}
    with db.begin():
        for file in files:
            contents = await file.read()
            log.info(file.filename)
            # 获取当前年份 current_year,获取当前日期
            current_date = datetime.now()
            current_year = pd.Timestamp.now().year
            print("year:{}".format(current_year))
            #  if file.filename != "美凡--辰飞10月份对账单 (空运) (3)(2).xls": continue
            df = pd.read_excel(BytesIO(contents), header=None, dtype=str)
            header_row_index = 0
            header_columns = []
            bill_field_name = None
            measureWeight_field_name = None
            amount_field_name = None
            date_field_name = None
            for index, row in df.iterrows():
                pattern = re.compile("[\u4e00-\u9fa5]*{}".format(keyword1))
                # print(row.values)
                match = [v for v in row.values if isinstance(v, str) and pattern.match(str(v))]
                # print("match:",match)
                if match:
                    header_row_index = index
                    bill_field_name = match[0]
            if header_row_index is not None:
                try:
                    # 删除NaN字段这些列, 确定干净的表头
                    cols_with_nan = df.iloc[header_row_index].isna()
                    df.drop(columns=df.columns[cols_with_nan], inplace=True)
                    header_columns = df.iloc[header_row_index].tolist()
                    # 删除字段列前面的行
                    df = df.drop(range(header_row_index + 1)).reset_index(drop=True)
                    df.columns = header_columns
                    # 判断是否有计费重量，没有则记录下; 需要统一名称的，记录原名称
                    amount_pattern = re.compile("(总|应收)?金额(合计)?.*$")
                    date_pattern = re.compile("(报关日期|对账日期|日期|核算时间|航班日期|航班时间)$")
                    weight_pattern = re.compile("(计费重量|计费)$")
                    measureWeight_match = [column for column in header_columns if weight_pattern.match(str(column))]
                    amount_match = [column for column in header_columns if amount_pattern.match(str(column))]
                    date_match = [column for column in header_columns if date_pattern.match(str(column))]
                    log.info("measureWeight_match: {}, amount_match: {}", measureWeight_match, amount_match)
                    if not measureWeight_match:
                        errors['no_measure_weight'].append(file.filename)
                        log.error("{},没有计费重量，表头应存在有'计费重量'关键字的列名！", file.filename)
                    else:
                        measureWeight_field_name = measureWeight_match[0]
                    if amount_match:
                        amount_field_name = amount_match[0]
                    if date_match:
                        date_field_name = date_match[0]
                except:
                    traceback.print_exc()
                    log.info(header_columns)

                # 单号列处理
                df[bill_field_name] = df[bill_field_name].astype(str)
                df[bill_field_name] = df[bill_field_name].str.replace("-", "")
                # 两次过虑垃圾数据
                # df.dropna(subset=[bill_field_name],axis=0, inplace=True)  #会剔除不完整的列，会误伤; subset减少误伤
                # first_invalid_index = df[bill_field_name].apply(lambda x: not x or len(x) < 11).idxmax() # 通过单号规范过滤
                # log.info("首个不规范单号的index: {}", first_invalid_index)
                # df = df.head(first_invalid_index)
                # 直接每列过滤，不用定位末元素方法
                df = df[df[bill_field_name].apply(lambda x: bool(re.match("[0-9]{11}", str(x))) if pd.notna(x) else False)]
                # 需要汇总的单处理
                if '费用类型' in df.columns.tolist():
                    log.info("转化....")
                    fei_yong_types = list(set(df['费用类型']))
                    log.info("费用类型: {}", fei_yong_types)
                    df = df.pivot_table(index=[bill_field_name, date_field_name], columns='费用类型', values=amount_field_name, aggfunc='sum')
                    df['总金额'] = df[fei_yong_types].sum(axis=1)
                    df.reset_index(inplace=True)
                df.set_index(bill_field_name, inplace=True)
                # 字段名规范化
                rename_field_dict = {}
                log.info("date_field_name:", date_field_name)
                if measureWeight_field_name:
                    rename_field_dict[measureWeight_field_name] = "计费重量"
                if amount_field_name:
                    rename_field_dict[amount_field_name] = "总金额"
                if date_field_name:
                    rename_field_dict[date_field_name] = "日期"
                df.rename(columns=rename_field_dict, inplace=True)
                # 不规范日期数据处理：
                if date_field_name:
                    if df['日期'].iloc[0].startswith('1900'):
                        ## 替换年份1900为2024, 并将日期列转换为datetime格式
                        df['日期'] = df['日期'].str.replace('1900', str(current_year))
                        df['日期'] = pd.to_datetime(df['日期'])
                        ## 再条件更新日期,处理跨年
                        df['日期'] = df['日期'].apply(lambda x: x.replace(year=current_year - 1) if x > current_date else x)
                        df['日期'] = df['日期'].dt.strftime('%Y-%m-%d')
                    if isinstance(df['日期'].iloc[0], int) or len(df['日期'].iloc[0]) == 5:
                        df['日期'] = pd.to_numeric(df['日期'])
                        df['日期'] = pd.to_datetime(df['日期'], origin='1899-12-30', unit='D')
                        df['日期'] = df['日期'].dt.strftime('%Y-%m-%d')
                log.info(rename_field_dict)
                # df['日期'] = pd.to_datetime(df['日期'], format='%m月%d日')
                # df['日期'] = df['日期'].dt.strftime('%Y-%m-%d')
                ## 根据主单号取数据数据
                results = getBillDetailByBillNo(db, billNos=df.index.tolist())
                if len(results) == 0:
                    df_join = df
                else:
                    df_db = pd.DataFrame([{'total_amount': r.total_amount, 'id': r.id, 'bill_no': r.bill_no, 'weight': r.weight_for_fee} for r in results])
                    df_db.set_index('bill_no', inplace=True)
                    df_join = df.join(df_db, how='left', rsuffix='_db')
                if '计费重量' not in df_join.columns:
                    df_join['计费重量'] = np.nan
                if '总金额' not in df_join.columns:
                    raise BaseAPIException(detail="表格没有金额列！！")
                df_join["主单号"] = df_join.index
                df_join = df_join.filter(["主单号", "日期", "计费重量", "总金额", "id", "weight", "total_amount"], axis=1)
                # 字段转数字类型
                df_join['总金额'] = pd.to_numeric(df_join['总金额'], errors='coerce');
                df_join['计费重量'] = pd.to_numeric(df_join['计费重量'], errors='coerce')
                if len(results) > 0:
                    df_join['total_amount'] = pd.to_numeric(df_join['total_amount'], errors='coerce');
                    df_join['weight'] = pd.to_numeric(df_join['weight'], errors='coerce')
                    df_join['id'] = pd.to_numeric(df_join['id'], errors='coerce').astype('Int64')
                    df_join['verify_weight'] = (df_join['计费重量'] == df_join['weight'])
                print(df_join)
                records = df_join.to_json(orient='records')
                print("records:", records)
                file_details[file.filename] = records
            else:
                errors['no_billNo'].append(file.filename)
                log.error("{},找不到表头，表头应存在有'单号'关键字的列名！", file.filename)
                continue

    return json_result_reps(data=file_details)

@yg_router.post("/excel_loaded")
async def excel_loaded(db: Session = Depends(get_db), files: List[UploadFile] = File(...), id_client: str = Form(...)):
    clients_dict = json.loads(id_client)
    log.info("clients_dict:{}",clients_dict)
    keyword1 = '单号'
    dfs = {}
    file_details = {}
    errors = {"no_billNo": [], "no_measure_weight": []}
    with db.begin():
        for file in files:
            contents = await file.read()
            log.info(file.filename)
            # 获取当前年份 current_year,获取当前日期
            current_date = datetime.now()
            current_year = pd.Timestamp.now().year
            print("year:{}".format(current_year))
            #  if file.filename != "美凡--辰飞10月份对账单 (空运) (3)(2).xls": continue
            df = pd.read_excel(BytesIO(contents), header=None, dtype=str)
            header_row_index = 0
            header_columns = []
            bill_field_name = None
            measureWeight_field_name = None
            amount_field_name = None
            date_field_name = None
            for index, row in df.iterrows():
                pattern = re.compile("[\u4e00-\u9fa5]*{}".format(keyword1))
                # print(row.values)
                match = [v for v in row.values if isinstance(v, str) and pattern.match(str(v))]
                # print("match:",match)
                if match:
                    header_row_index = index
                    bill_field_name = match[0]
            if header_row_index is not None:
                try:
                    # 删除NaN字段这些列, 确定干净的表头
                    cols_with_nan = df.iloc[header_row_index].isna()
                    df.drop(columns=df.columns[cols_with_nan], inplace=True)
                    header_columns = df.iloc[header_row_index].tolist()
                    # 删除字段列前面的行
                    df = df.drop(range(header_row_index + 1)).reset_index(drop=True)
                    df.columns = header_columns
                    # 获取费用字段
                    id_fee_items = db.query(TradePartner.fee_items).filter(TradePartner.id == clients_dict[file.filename]).scalar()
                    if not id_fee_items:
                        raise BaseAPIException(detail="{}, 没有找到对应的费用项目ID！！".format(file.filename))
                    fee_items  = [r[0] for r in db.query(YGFeeType.name).filter(YGFeeType.id.in_(id_fee_items.split(","))).all()]
                    if not fee_items:
                        raise BaseAPIException(detail="{}, 没有找到对应的费用项目名称！！".format(file.filename))
                    log.info("fee_items:{}", fee_items)
                    log.info("header_columns:{}", header_columns)
                    # 查找费用的字段; 需要统一名称的，记录原名称
                    column_mapping = {}
                    def is_valid(column, feeItem):
                        if column in ['重量', '计费重量']:
                            return column == feeItem
                        else:
                            return column in feeItem or feeItem in column
                    fee_and_common_items = fee_items + ['始发港', '目的港', '件数', '重量', '计费重量', '单价', '总金额', '备注']
                    for item in fee_and_common_items:
                        item_matched = [column for column in header_columns if is_valid(column, item)]
                        if item_matched:
                            column_mapping[item_matched[0]] = item
                    date_pattern = re.compile(".*日期$")
                    date_match = [column for column in header_columns if date_pattern.match(str(column))]
                    if date_match:
                        column_mapping[date_match[0]] = "日期"
                    column_mapping[bill_field_name] = "主单号"
                    df.rename(columns=column_mapping, inplace=True)
                except:
                    traceback.print_exc()
                    log.info(header_columns)
                # 日期
                df["日期"] = df["日期"].astype(str)
                df["日期"] = df["日期"].apply(dateFormat)
                print("list(column_mapping.values()) + fee_and_common_items:",list(column_mapping.values()) + fee_and_common_items)
                df = df.reindex(columns=list(set(list(column_mapping.values()) + fee_and_common_items)), fill_value=None)
                # df = df.filter(list(set(list(column_mapping.values()) + fee_items)), axis=1)
                # 字段转数字类型
                if item in fee_items:
                    df[item] = pd.to_numeric(df[item], errors='coerce')
                df[fee_items] = df[fee_items].fillna(0)
                # 用主单号过滤有效值
                mask = df['主单号'].notna()
                ## 使用cumprod找到第一个False出现的位置
                valid_rows = mask.cumprod() == 1
                ## 截取数据
                df = df[valid_rows]
                """
                first_idx = df['主单号'].first_valid_index()
                end_idx = df['主单号'].last_valid_index()
                log.info("end_idx:{}", end_idx)
                """
                #print("df:",df)
                records = df.to_json(orient='records')
                # print("records:", records)
                file_details[file.filename] = records
            else:
                errors['no_billNo'].append(file.filename)
                log.error("{},找不到表头，表头应存在有'单号'关键字的列名！", file.filename)
                continue

    return json_result_reps(data=file_details)

"""
pattern = 'a.*e' result = df[df['column_name'].str.contains(pattern,
"""

@yg_router.post("/excel_data_import")
async def create_bill_from_excel(db: Session = Depends(get_db), data: Dict = Body()):
    err_msg = ""
    try:
        if "type" in data and data["type"] == "checking_bill":
            # #all_fee_items = db.scalars(db.query(YGFeeType.name)).all()
            all_fee_items = [(r.id,r.name) for r in db.query(YGFeeType.id,YGFeeType.name).options(noload('*')).all()]
            print("all_fee_items:", all_fee_items)
            def getIdByFee(feeName:str):
                for item in all_fee_items:
                    if item[1] == feeName:
                        return item[0]
            print("getIdByFee:", getIdByFee("运费"))
            checkingBillFees = []
            for k, records in data["records"].items():
                    print("override:", records[3])
                    for r in records[0]:
                        # 主表手动merge
                        checkingBill = db.query(orm.CheckingBill).options(noload('*') ).filter(
                            orm.CheckingBill.bill_no == r.get("主单号"),
                            orm.CheckingBill.bill_type == 1
                        ).first()
                        departure_port = r.get("始发港","HGH") or "HGH"
                        if checkingBill:
                            if not records[3]: #是否启覆盖更新
                                continue
                            checkingBill.bill_date=r.get("日期")
                            checkingBill.articles=r.get("件数")
                            checkingBill.unit_price = r.get("单价")
                            checkingBill.weight_for_fee=r.get("计费重量")
                            checkingBill.weight=r.get("重量")
                            checkingBill.total_amount=r.get("总金额")
                            checkingBill.departure_port = departure_port
                            checkingBill.destination_port= r.get("目的港")
                            checkingBill.memo=r.get("备注")
                            checkingBill.update_by=data.get("create_by")
                            checkingBill.update_time=datetime.now()
                            checkingBill.id_trade_partner = records[1];checkingBill.id_company = records[2]
                            db.query(orm.CheckingBillFee).filter(orm.CheckingBillFee.id_CheckingBill == checkingBill.id).delete(synchronize_session=False)
                            log.info("to update:{}", checkingBill)
                        else:
                            checkingBill = orm.CheckingBill(bill_date=r.get("日期"),bill_no=r.get("主单号"),articles=r.get("件数"),weight_for_fee=r.get("计费重量"),weight=r.get("重量"),
                                             bill_type=1, status="0", unit_price=r.get("单价"),total_amount=r.get("总金额"),destination_port=r.get("目的港"),departure_port=departure_port,memo=r.get("备注"),
                                             id_trade_partner=records[1],id_company=records[2],create_by=data.get("create_by"), update_by=data.get("create_by"), update_time=datetime.now())
                            db.add(checkingBill)
                            db.flush()
                        for field in r.keys():
                            id_feeType=getIdByFee(feeName=field)
                            if not id_feeType: # 非费用字段过滤
                                continue
                            print("feeType:{},金额:{}".format(id_feeType,r.get(field)))
                            # 中间表,用id不用对象，可以触发中间表merge；但merge在记录多时很慢，放弃
                            amount = Decimal(r.get(field))
                            if amount > 0:
                                checkingBillFee=orm.CheckingBillFee(id_FeeType=id_feeType, id_CheckingBill=checkingBill.id, amount=amount)
                                checkingBillFees.append(checkingBillFee)
            db.add_all(checkingBillFees)
            db.commit()
        else:
            ormObjects = [orm.CheckingBillFromExcel(bill_no=r.get('主单号'), bill_date=r.get('日期'), file_name=k, weight_for_fee=r.get('计费重量'), total_amount=r.get('总金额'),
                                                    id_CheckingBill=r.get('id'),
                                                    create_by=data.get("create_by"), id_client=records[1], update_by=data.get("create_by"), update_time=datetime.now())
                          for k, records in data["records"].items() for r in records[0]]
            add_or_update_records(db, ormObjects)
    except Exception as e:
        err_msg = traceback.format_exc()
        print("异常信息是:", err_msg)
        raise BaseAPIException(detail=err_msg)

    return json_result_reps()


@yg_router.post("/excel_loaded_data_list")
async def excel_loaded_data_list(db: Session = Depends(get_db), param: Dict = Body()):
    print(param)
    A = aliased(CheckingBillFromExcel, name='A')
    B = aliased(CheckingBill, name='B')
    C = aliased(TradePartner, name='C')
    # D = aliased(Client, name='D')
    all_data = db.query(A).outerjoin(B, B.bill_no == A.bill_no).outerjoin(C, B.id_trade_partner == C.id) \
        .with_entities(A.bill_no, A.bill_date, A.id_client, C.group_type, C.id_client.label("id_partner_client"), A.total_amount, A.weight_for_fee, A.update_by, A.update_time,
                       B.bill_type,B.total_amount.label('total_amount_sys'), B.weight_for_fee.label('weight_for_fee_sys'))
    if isinstance(param.get("bill_date"), list) and len(param.get("bill_date")) == 2:
        all_data = all_data.filter(A.bill_date.between(param.get("bill_date")[0], param.get("bill_date")[1] + " 23:00:00.000"))
    if isinstance(param.get("update_date"), list) and len(param.get("update_date")) == 2:
        all_data = all_data.filter(A.update_time.between(param.get("update_date")[0], param.get("update_date")[1] + " 23:00:00.000"))
    if param.get("bill_no"):
        log.info("bill_no filter.....")
        all_data = all_data.filter(A.bill_no.like("{}%".format(param.get("bill_no"))))
    # 之前sql包装成子查询，然后group by汇总
    subquery = all_data.subquery()
    grouped_data = db.query(subquery.c.bill_no,subquery.c.bill_date, subquery.c.weight_for_fee, subquery.c.total_amount, subquery.c.id_client, subquery.c.update_by, subquery.c.update_time,
                            func.max(case((subquery.c.bill_type == 1, subquery.c.weight_for_fee_sys), else_=0)).label( 'weight_for_fee_sys'),
                            func.max(case((subquery.c.bill_type == 1, subquery.c.group_type), else_='')).label( 'group_type'),
                            func.max(case((subquery.c.bill_type == 1, subquery.c.id_partner_client), else_=0)).label( 'id_partner_client'),
                            func.sum(case((subquery.c.bill_type == 1, subquery.c.total_amount_sys), else_=-subquery.c.total_amount_sys)).label('total_amount_sys')
                            ).group_by(subquery.c.bill_no,subquery.c.bill_date, subquery.c.weight_for_fee, subquery.c.total_amount, subquery.c.id_client, subquery.c.update_by, subquery.c.update_time)

    total_rec = grouped_data.count()
    pagination_data = grouped_data.order_by(subquery.c.bill_no).limit(param["pageSize"]).offset((param["current"] - 1) * param["pageSize"]).all()
    # print(pagination_data)
    return json_result_reps(data={"total": total_rec, "records": pagination_data}, use_serializer=True)


@yg_router.get("/template_type")
async def get_template_type(db: Session = Depends(get_db)):
    res = crud.get_template_types(db)
    [db.refresh(r) for r in res]
    return json_result_reps(data=res)


@yg_router.get("/template")
async def get_template(db: Session = Depends(get_db)):
    res = crud.get_templates(db)
    [db.refresh(r) for r in res]
    return json_result_reps(data=res)


@yg_router.post("/template")
async def create_template(db: Session = Depends(get_db), template: schemas.Template = Body()):
    # print(template)
    res = crud.create_template(db, templates=[template])
    return json_result_reps(data=res)


@yg_router.get("/get_template_by_type")
async def get_template_by_type(db: Session = Depends(get_db), template_type_id: int = Query()):
    res = db.query(orm.Template).where(orm.Template.template_type_id == template_type_id).all()
    [db.refresh(r) for r in res]
    return json_result_reps(data=res)


@yg_router.get("/get_template_by_category")
async def get_template_by_type(db: Session = Depends(get_db), category: str = Query()):
    q = db.query(orm.Template).filter(orm.TemplateType.category == category)
    print(q)
    res = q.all()
    [db.refresh(r) for r in res]
    return json_result_reps(data=res)


@yg_router.delete("/template")
async def delete_template(db: Session = Depends(get_db), ids: List[int] = Body(alias="id", embed=True)):
    res = crud.get_templates_by_ids(db, ids=ids, is_delete=True)
    return json_result_reps(data=res)


@yg_router.post("/exportXlsBySql")
async def exportXls(db: Session = Depends(get_db), req_data: schemas.excelBySqlData = Body()):
    data = crud.getDataBySql(db, req_data.sql)

    print(req_data.id_template)
    tpl = crud.get_templates_by_ids(db, ids=[req_data.id_template])
    config_dict = orjson.loads(tpl[0].config.encode())
    # print(config_dict)
    subtitle = req_data.subtitle
    columns = []
    if config_dict:
        if 'columns' in config_dict:
            columns_tmp_dict = {c.dataIndex: c.__dict__ for c in req_data.columns if c.dataIndex in config_dict['columns']}
            for k, v in config_dict['columns'].items():
                if k in columns_tmp_dict:
                    c = columns_tmp_dict[k]
                    c.update(v)
                else:
                    c = v
                    c.update({"dataIndex": k})
                columns.append(c)
            log.info(columns)
        if 'subtitle' in config_dict and config_dict['subtitle'] == 'date':
            subtitle = f'日期：{datetime.now().strftime("%Y-%m-%d")}'
    else:
        columns = [c.__dict__ for c in req_data.columns]

    return export_excel(columns, data, req_data.fileName, req_data.sheetName, req_data.title, subtitle)


@yg_router.post("/stringHandler/specifications")
async def string_handler(param: schemas.stringData = Body()):
    # print(param)
    lst = param.inputData.split("\n")
    seperator = param.seperator or "\n"
    # print(lst)
    res = ""
    if param.chunkSize == 0:
        res = " ".join(lst)
    else:
        res_arr = []
        grp = [lst[i:i + param.chunkSize] for i in range(0, len(lst), param.chunkSize)]
        for g in grp:
            res_arr.append(" ".join(g))
        res = seperator.join(res_arr)
    return json_result_reps(data=res)


@yg_router.post("/stringHandler/specsToVolume")
async def specsToVolume(param: schemas.stringData = Body()):
    log.info(param)
    seperator = param.seperator or "\n"
    lst = re.split(f'[\n${seperator}]+', param.inputData)
    print(lst)
    log.info("param.isMergeSameSpecs:{}",param.isMergeSameSpecs)
    specs = []
    spec_dicts = {}
    total = 0
    articles = 0
    # 先归纳，再计算
    for item in lst:
        if item.strip() == "":
            continue
        match = re.match(r'^([0-9]+\.?[0-9]*)[\*]([0-9]+\.?[0-9]*)[\*]([0-9]+\.?[0-9]*)(?:[\*]([0-9]+\.?[0-9]*))?$', item.strip())
        if match:
            B = int(float(match.group(1)))
            C = int(float(match.group(2)))
            D = int(float(match.group(3)))
            F = int(float(match.group(4))) if match.group(4) else 1  # 没有件数，则件数为1
            log.info("param.isMergeSameSpecs:{}",param.isMergeSameSpecs)
            if param.isMergeSameSpecs:
                key = f'{B}*{C}*{D}'
                if key not in spec_dicts:
                    spec_dicts[key] = F
                else:
                    spec_dicts[key] += F
            else:
                key = f'{B}*{C}*{D}*{F}'
                spec_dicts[key] = F
        else:
            log.error("{}, {}", item.strip(), "did not match")
            spec_dicts[item.strip()] = 0

    for k,v in spec_dicts.items():
        spec = {'str': k } if k.count("*") > 2 else {'str': f'{k}*{v}'}
        try:
            if v>0:
                spec['result'] = eval(k)/ 1000000 * v
                # 有一列错误，但后续不再计算总和，但是单列继续计算，以方便分析
                if total != "无法计算结果":
                    print(spec['result'])
                    total = Decimal(str(spec['result'])) + Decimal(str(total))
                    print(total)
                    articles += int(float(v))
            else:
                spec['result'] = "规格不规范不能解析"
                articles = total = "无法计算结果"
        except Exception as e:
            spec['result'] = "规格不规范不能解析"
            articles = total = "无法计算结果"
        specs.append(f'{spec["str"]} = {spec["result"]}')

    total = round(total, 2) if is_number(str(total)) else total
    log.info("....original_total....:{}", total)
    specs.append(f'总体积 = {total}')
    specs.append(f'总件数 = {articles}')
    res = {'process': "\n".join(specs), 'total': str(total)}
    return json_result_reps(data=res)


# 【标准展示代码】
## 通用-----------------------
"""
子表出现的三种方式，joinedload, selectinload(默认), subquery
"""


@yg_router.post("/finance/getCheckBill")
def getCheckBill(db: Session = Depends(get_db), param: Dict = Body()):
    sql = db.query(CheckingBill).options(joinedload(CheckingBill.checkingBillFees).joinedload(CheckingBillFee.yGFeeType, innerjoin=True))
    if param.get("bill_no"):
        sql = sql.filter(CheckingBill.bill_no == param.get("bill_no"))
    if param.get("pageSize") and param.get("current"):
        count = sql.count
        sql = sql.order_by(CheckingBill.bill_no).limit(param["pageSize"]).offset((param["current"] - 1) * param["pageSize"])
        data = {"total": count, "records": sql.all()}
    else:
        data = {"records": sql.all()}

    data["records"] = [
        {
            **{k: v for k, v in r.to_dict().items() if k != "checkingBillFees"},
            'feeData': {c.yGFeeType.code: c.amount for c in r.checkingBillFees},
            'fee_items': ",".join([str(c.yGFeeType.id) for c in r.checkingBillFees])
        }
        for r in data["records"]
    ]

    return json_result_reps(data=data, use_serializer=True)


@yg_router.put("/finance/CheckBill")
def createOrUpdateCheckBill(db: Session = Depends(get_db), param: Dict = Body()):
    return json_result_reps(data=[], use_serializer=True)


@yg_router.delete("/finance/CheckBill")
def deleteCheckBill(db: Session = Depends(get_db), param: Dict = Body()):
    print(param)
    return None


## 专项------------------------
@yg_router.put("/finance/updateCheckBillMemo")
def updateCheckBillMemo(db: Session = Depends(get_db), param: Dict = Body()):
    print(param)
    # # 更新name属性
    # session.query(User).filter_by(id=1).update({"name": "New Name"})
    # session.commit()
    return None


"""
全量字段：
{
  "bill_date": {
    "title": "帐单日期",
    "width": 100
  },
  "bill_no": {
    "title": "帐单号",
    "width": 120
  },
  "flight_no": {
    "title": "航班号",
    "width": 70
  },
  "departure_port": {
    "title": "始发港",
    "width": 60
  },
  "destination_port": {
    "title": "目的港",
    "width": 60
  },
  "articles": {
    "title": "件数",
    "width": 70
  },
  "weight": {
    "title": "重量",
    "width": 70
  },
  "weight_for_fee": {
    "title": "计费重量",
    "width": 70
  },
  "unit_price": {
    "title": "单价",
    "width": 60
  },
  "freight": {
    "title": "运费",
    "width": 65
  },
  "fee_bill_making": {
    "title": "制单费",
    "width": 65
  },
  "fee_customs_declaration": {
    "title": "报关费",
    "width": 65
  },
  "total_amount": {
    "title": "总金额",
    "width": 75
  },
  "status": {
    "title": "状态",
    "width": 60
  },
  "memo": {
    "title": "备注",
    "width": 120
  }
}
——>
{ 
"columns": {
  "xuhao": {
    "title": "序号",
    "width": 50
  },
  "bill_date": {
    "title": "日期"
  },
  "bill_no": {
    "title": "主单号"
  },
  "articles": {
    "title": "件数",
    "align": "center,vcenter"
  },
  "weight": {
    "title": "重量"
  },
  "weight_for_fee": {
    "title": "计费重量"
  },
  "destination_port": {
    "title": "国家地区"
  },
  "unit_price": {
    "title": "单价"
  },
  "freight": {
    "title": "运费"
  },
  "fee_bill_making": {
    "title": "制单费"
  },
  "fee_customs_declaration": {
    "title": "报关费"
  },
  "total_amount": {
    "title": "总金额"
  },
  "memo": {
    "title": "备注"
  }
},
"subtitle": "date"
}
"""
