import os

from pathlib import Path

import time

import zipfile

from fastapi import APIRouter
from re_common.baselibrary.utils.baseboto3 import BaseBoto3

from re_common.baselibrary.utils.basedir import BaseDir
from re_common.baselibrary.utils.basefile import BaseFile
from re_common.baselibrary.utils.baserequest import BaseRequest

from apps.core.global_model import InputPlatformModel
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import ReturnInfo, FAILED, SUCCESS

from apps.crawler_platform.core_platform.g_model import IsticData, IsticTasks
# import binascii
import codecs
import json
import re
import xml.etree.cElementTree as ET
from xml.dom import minidom

# import mmh3

router = APIRouter(route_class=ContextIncludedRoute)


def get_datas(datas, dele_batch):
    now_date = time.strftime('%Y%m%d', time.localtime())

    cur_path = BaseDir.get_file_dir_absolute(__file__)
    top_path = BaseDir.get_upper_dir(cur_path, -4)
    sPath = BaseFile.get_new_path(top_path, "download", "istic")
    BaseDir.create_dir(sPath)
    detail = BaseFile.get_new_path(sPath, now_date)
    BaseDir.create_dir(detail)
    json_addr = BaseFile.get_new_path(detail, "jsons")
    BaseDir.create_dir(json_addr)
    # 清空 前面程序错误导致的冗余数据
    for file in BaseDir.get_dir_all_files(json_addr):
        if dele_batch not in file:
            os.remove(file)
    file_name = BaseFile.get_new_path(json_addr, dele_batch + ".json")
    for data in datas:
        with open(file_name, mode="a", encoding="utf-8") as f:
            f.write(json.dumps(data, ensure_ascii=False) + "\n")
    return True


class Json2Xml(object):
    def __init__(self, now_date):
        # self.test_dir1 = r'D:\z_test\1921-00001'
        self.now_date = now_date

        self.cur_path = BaseDir.get_file_dir_absolute(__file__)
        self.top_path = BaseDir.get_upper_dir(self.cur_path, -4)
        self.sPath = BaseFile.get_new_path(self.top_path, "download", "istic")
        BaseDir.create_dir(self.sPath)
        self.detail = BaseFile.get_new_path(self.sPath, self.now_date)
        BaseDir.create_dir(self.detail)
        self.json_addr = BaseFile.get_new_path(self.detail, "jsons")
        BaseDir.create_dir(self.json_addr)
        self.xml_addr = BaseFile.get_new_path(self.detail, "add")
        BaseDir.create_dir(self.xml_addr)
        self.dbdir = BaseFile.get_new_path(self.detail, "errors")
        BaseDir.create_dir(self.dbdir)
        self.del_dir = BaseFile.get_new_path(self.detail, "delete")
        BaseDir.create_dir(self.del_dir)
        self.test_err_dir = BaseFile.get_new_path(self.detail, "test_dir")
        BaseDir.create_dir(self.test_err_dir)
        self.cnt = 0
        # self.dbdir = r"/usr/local/vipcloud/data/data2/xujiang/zcn_1_ln"
        self.data_dic = dict()
        self.mdb_dic = dict()
        self.dele_cnt = 0
        self.exclude_list = [
            '71889X', '71890X', '71999X', '71995X', '71994X', '71997X', '72129X', '72001X', '72176X', '72081X',
            '72524X', '72003X', '71899X', '72149X', '72150X', '72177X', '72191X', '72203X', '72208X', '59321X',
            '59863X', '59864X', '72193X', '72192X', '59865X', '72127X', '71996X', '71998X', '72040X', '71888X',
            '72004X', '72000X', '72182X'
        ]

    def cleanfield(self, field):
        field = str(field)
        if field == 'None':
            field = ''
        field = field.replace("'", "''").replace('\r',
                                                 '').replace('\n', '').replace(
            '\t', '').strip()
        field = re.sub('[\x00-\x08|\x0b-\x0c|\x0e-\x1f|\x7f]', '', field)
        # field = field.replace("'", "''").replace('\r', '').replace('\n', '').replace('\t', '').replace('','').strip()
        return field

    def init_dic(self, file_name):
        for ones in BaseFile.read_file_r_mode_yield(file_name):
            br = self.single_test_err(ones)
            if br:
                one = json.loads(ones)
                lngid = one["lngid"]
                if lngid.startswith("REF"):
                    continue
                # process_date = one.get("process_date", "")
                # if process_date < "20240101":
                #     continue
                # error_code = one.get("error_code", "")
                # if error_code != "":
                #     continue
                pub_year = one["pub_year"]
                gch = one["gch"]
                gch5 = gch[0:5]
                num = one["num"]
                book_id = one["book_id"]
                is_deprecated = one["is_deprecated"]
                # 增加排除的gch, 这些gch里面的数据, 不管弃用还是不弃用, 都直接进删除文件
                if is_deprecated == "1" or gch in self.exclude_set:
                    lngid_file_name = BaseFile.get_new_path(self.del_dir, "lngid.txt")
                    with open(lngid_file_name, mode="a", encoding="utf-8") as f:
                        f.write(lngid + "\n")
                    self.dele_cnt += 1
                else:
                    if self.data_dic.get(pub_year) is None:
                        self.data_dic[pub_year] = []
                    self.data_dic[pub_year].append(one)
                    if len(self.data_dic[pub_year]) == 50000:
                        self.json_to_xml_before(pub_year)

                    mdb_key = f"{gch5}★{pub_year}★{num}★{book_id}"
                    if self.mdb_dic.get(mdb_key) is None:
                        self.mdb_dic[mdb_key] = 1
                    else:
                        self.mdb_dic[mdb_key] += 1
                    self.cnt += 1
            else:
                # 即使如此, 我认为, lngid一定不会有错, 因此还是将这里取值
                one = json.loads(ones)
                lngid = one["lngid"]
                errfile = codecs.open('%s/error.txt' % (self.dbdir), "a",
                                      "utf-8")
                errfile.writelines('%s\t%s\n' % (lngid, ones))
                errfile.close()

    def writexml_single(self, etroot):
        xmlfile_name = '%s/%s.xml' % (self.test_err_dir, f"test")
        mdtree = minidom.parseString(ET.tostring(etroot))
        xmlfile = codecs.open(xmlfile_name, "w", "utf-8")
        xmlfile.writelines(mdtree.toprettyxml(encoding='utf-8').decode('utf-8'))
        xmlfile.close()
        del mdtree

    def single_test_err(self, my_json):
        etroot = ET.Element('test')
        new_my_json = self.cleanfield(my_json)
        etrecord = ET.SubElement(etroot, 'record')
        etlngid = ET.SubElement(etrecord, 'lngid')
        etlngid.text = new_my_json
        try:
            self.writexml_single(etroot)
            return True
        except:
            print("检测到错误")
            return False

    def json_to_xml_before(self, pub_year):
        etroot = ET.Element('cqviprecords')
        data_list = self.data_dic[pub_year]
        for one in data_list:
            etrecord = ET.SubElement(etroot, 'record')
            lngid = self.cleanfield(one["lngid"])
            etlngid = ET.SubElement(etrecord, 'lngid')
            etlngid.text = lngid
            # title_c
            title_c = self.cleanfield(one.get("title", ""))
            ettitle_c = ET.SubElement(etrecord, 'title_c')
            ettitle_c.text = title_c
            # beginpage
            beginpage = self.cleanfield(one.get("begin_page", ""))
            etbeginpage = ET.SubElement(etrecord, 'beginpage')
            etbeginpage.text = beginpage
            # endpage
            # endpage = self.cleanfield(one["end_page"])
            endpage = self.cleanfield(one.get("end_page", ""))
            etendpage = ET.SubElement(etrecord, 'endpage')
            etendpage.text = endpage
            # jumppage
            # jumppage = self.cleanfield(one["jump_page"])
            jumppage = self.cleanfield(one.get("jump_page", ""))
            etjumppage = ET.SubElement(etrecord, 'jumppage')
            etjumppage.text = jumppage
            # remark_c
            # remark_c = self.cleanfield(one["abstract"])
            remark_c = self.cleanfield(one.get("abstract", ""))
            etremark_c = ET.SubElement(etrecord, 'remark_c')
            etremark_c.text = remark_c
            # name_c
            name_c = self.cleanfield(one.get("journal_name", ""))
            etname_c = ET.SubElement(etrecord, 'name_c')
            etname_c.text = name_c
            # years
            # years = self.cleanfield(one["pub_year"])
            years = self.cleanfield(one.get("pub_year", ""))
            etyears = ET.SubElement(etrecord, 'years')
            etyears.text = years
            # vol
            # vol = self.cleanfield(one["vol"])
            vol = self.cleanfield(one.get("vol", ""))
            etvol = ET.SubElement(etrecord, 'vol')
            etvol.text = vol
            # num
            # num = self.cleanfield(one["num"])
            num = self.cleanfield(one.get("num", ""))
            etnum = ET.SubElement(etrecord, 'num')
            etnum.text = num
            # keyword_c
            keyword_c = self.cleanfield(one.get("keyword", ""))
            for kwd in keyword_c.split(';'):
                etkeyword_c = ET.SubElement(etrecord, 'keyword_c')
                etkeyword_c.text = kwd
            # firstclass
            firstclass = self.cleanfield(one.get("clc_no_1st", ""))
            etfirstclass = ET.SubElement(etrecord, 'firstclass')
            etfirstclass.text = firstclass

            # authorlist showwriter showorgan
            showwriter = self.cleanfield(one.get("author", ""))
            showorgan = self.cleanfield(one.get("organ", ""))
            numlist = []
            writerlist = []
            organlist = []
            try:
                # showwriter = '关国杰[][1];杨磊[[2];]'
                # showorgan = '[1]中国电力工程顾问集团西南电力设计院有限公司 成都市 610021;[2]四川省地质调查院遥感中心 成都市 610081'
                numlist, writerlist, organlist = self.findrelationship(
                    showwriter, showorgan)
            except:
                errfile = codecs.open('%s/error.txt' % (self.dbdir), "a",
                                      "utf-8")
                errfile.writelines('%s\t%s\t%s\n' % (lngid, showwriter, showorgan))
                errfile.close()

            # if len(writerlist) == 1 and writerlist[0] == '' and len(organlist) == 1 and organlist[0] == '':
            #
            #     etauthorlist = ET.SubElement(etrecord, 'authorlist')
            #
            # else:
            etauthorlist = ET.SubElement(etrecord, 'authorlist')
            for author_sequence in numlist:
                etauthor = ET.SubElement(etauthorlist, 'author')
                etauthor_sequence = ET.SubElement(etauthor, 'author_sequence')
                etauthor_sequence.text = str(author_sequence)
                writer = writerlist[author_sequence - 1]
                etwriter = ET.SubElement(etauthor, 'writer')
                etwriter.text = writer
                organ = organlist[author_sequence - 1]
                etorgan = ET.SubElement(etauthor, 'organ')
                etorgan.text = organ
            # issn
            issn = self.cleanfield(one.get("issn", ""))
            etissn = ET.SubElement(etrecord, 'issn')
            etissn.text = issn
            # pagecount
            pagecount = self.cleanfield(one.get("page_cnt", ""))
            etpagecount = ET.SubElement(etrecord, 'pagecount')
            etpagecount.text = pagecount
            # cnno
            cnno = self.cleanfield(one.get("cnno", ""))
            etcnno = ET.SubElement(etrecord, 'cnno')
            etcnno.text = cnno
            # imburse
            imburse = self.cleanfield(one.get("fund", ""))
            etimburse = ET.SubElement(etrecord, 'imburse')
            etimburse.text = imburse
            # doi
            doi = self.cleanfield(one.get("doi", ""))
            etdoi = ET.SubElement(etrecord, 'doi')
            etdoi.text = doi
            # class
            class_c = self.cleanfield(one.get("clc_no", ""))
            for cls_c in class_c.split(' '):
                etclass_c = ET.SubElement(etrecord, 'class_c')
                etclass_c.text = cls_c
            # pdf volumn specialnum, subjectnum, gch
            volumn = self.cleanfield(one.get("volumn", ""))
            specialnum = self.cleanfield(one.get("special_no", "")).split(',')[0]
            subjectnum = self.cleanfield(one.get("subject_no", ""))
            gch = self.cleanfield(one.get("gch", ""))
            etgch = ET.SubElement(etrecord, 'gch')
            etgch.text = gch
            if volumn:
                pdfpath = volumn + '/' + specialnum + subjectnum + '/' + gch + '/' + vol.zfill(3) + '/' + num.zfill(
                    3) + '/' + lngid + '.pdf'
            else:
                pdfpath = ""
            etpdfpath = ET.SubElement(etrecord, 'pdfpath')
            etpdfpath.text = pdfpath

        try:
            self.writexml(etroot, pub_year)
            # 在这里清空这个年份的数据,以免后续重复
            self.data_dic[pub_year] = []
        except:
            errfile = codecs.open('%s/errorxml.txt' % (self.dbdir),
                                  "a", "utf-8")
            errfile.writelines('%s\n' % (
                ET.tostring(etroot, encoding='utf-8').decode('utf-8')))
            errfile.close()
            etroot.clear()

    def json_to_xml(self):
        etroot = ET.Element('cqviprecords')
        for pub_year, data_list in self.data_dic.items():
            for one in data_list:
                etrecord = ET.SubElement(etroot, 'record')
                lngid = self.cleanfield(one["lngid"])
                etlngid = ET.SubElement(etrecord, 'lngid')
                etlngid.text = lngid
                # title_c
                title_c = self.cleanfield(one.get("title", ""))
                ettitle_c = ET.SubElement(etrecord, 'title_c')
                ettitle_c.text = title_c
                # beginpage
                beginpage = self.cleanfield(one.get("begin_page", ""))
                etbeginpage = ET.SubElement(etrecord, 'beginpage')
                etbeginpage.text = beginpage
                # endpage
                # endpage = self.cleanfield(one["end_page"])
                endpage = self.cleanfield(one.get("end_page", ""))
                etendpage = ET.SubElement(etrecord, 'endpage')
                etendpage.text = endpage
                # jumppage
                # jumppage = self.cleanfield(one["jump_page"])
                jumppage = self.cleanfield(one.get("jump_page", ""))
                etjumppage = ET.SubElement(etrecord, 'jumppage')
                etjumppage.text = jumppage
                # remark_c
                # remark_c = self.cleanfield(one["abstract"])
                remark_c = self.cleanfield(one.get("abstract", ""))
                etremark_c = ET.SubElement(etrecord, 'remark_c')
                etremark_c.text = remark_c
                # name_c
                name_c = self.cleanfield(one.get("journal_name", ""))
                etname_c = ET.SubElement(etrecord, 'name_c')
                etname_c.text = name_c
                # years
                # years = self.cleanfield(one["pub_year"])
                years = self.cleanfield(one.get("pub_year", ""))
                etyears = ET.SubElement(etrecord, 'years')
                etyears.text = years
                # vol
                # vol = self.cleanfield(one["vol"])
                vol = self.cleanfield(one.get("vol", ""))
                etvol = ET.SubElement(etrecord, 'vol')
                etvol.text = vol
                # num
                # num = self.cleanfield(one["num"])
                num = self.cleanfield(one.get("num", ""))
                etnum = ET.SubElement(etrecord, 'num')
                etnum.text = num
                # keyword_c
                keyword_c = self.cleanfield(one.get("keyword", ""))
                for kwd in keyword_c.split(';'):
                    etkeyword_c = ET.SubElement(etrecord, 'keyword_c')
                    etkeyword_c.text = kwd
                # firstclass
                firstclass = self.cleanfield(one.get("clc_no_1st", ""))
                etfirstclass = ET.SubElement(etrecord, 'firstclass')
                etfirstclass.text = firstclass

                # authorlist showwriter showorgan
                showwriter = self.cleanfield(one.get("author", ""))
                showorgan = self.cleanfield(one.get("organ", ""))
                numlist = []
                writerlist = []
                organlist = []
                try:
                    # showwriter = '关国杰[][1];杨磊[[2];]'
                    # showorgan = '[1]中国电力工程顾问集团西南电力设计院有限公司 成都市 610021;[2]四川省地质调查院遥感中心 成都市 610081'
                    numlist, writerlist, organlist = self.findrelationship(
                        showwriter, showorgan)
                except:
                    errfile = codecs.open('%s/error.txt' % (self.dbdir), "a",
                                          "utf-8")
                    errfile.writelines('%s\t%s\t%s\n' % (lngid, showwriter, showorgan))
                    errfile.close()

                # if len(writerlist) == 1 and writerlist[0] == '' and len(organlist) == 1 and organlist[0] == '':
                #
                #     etauthorlist = ET.SubElement(etrecord, 'authorlist')
                #
                # else:
                etauthorlist = ET.SubElement(etrecord, 'authorlist')
                for author_sequence in numlist:
                    etauthor = ET.SubElement(etauthorlist, 'author')
                    etauthor_sequence = ET.SubElement(etauthor, 'author_sequence')
                    etauthor_sequence.text = str(author_sequence)
                    writer = writerlist[author_sequence - 1]
                    etwriter = ET.SubElement(etauthor, 'writer')
                    etwriter.text = writer
                    organ = organlist[author_sequence - 1]
                    etorgan = ET.SubElement(etauthor, 'organ')
                    etorgan.text = organ
                # issn
                issn = self.cleanfield(one.get("issn", ""))
                etissn = ET.SubElement(etrecord, 'issn')
                etissn.text = issn
                # pagecount
                pagecount = self.cleanfield(one.get("page_cnt", ""))
                etpagecount = ET.SubElement(etrecord, 'pagecount')
                etpagecount.text = pagecount
                # cnno
                cnno = self.cleanfield(one.get("cnno", ""))
                etcnno = ET.SubElement(etrecord, 'cnno')
                etcnno.text = cnno
                # imburse
                imburse = self.cleanfield(one.get("fund", ""))
                etimburse = ET.SubElement(etrecord, 'imburse')
                etimburse.text = imburse
                # doi
                doi = self.cleanfield(one.get("doi", ""))
                etdoi = ET.SubElement(etrecord, 'doi')
                etdoi.text = doi
                # class
                class_c = self.cleanfield(one.get("clc_no", ""))
                for cls_c in class_c.split(' '):
                    etclass_c = ET.SubElement(etrecord, 'class_c')
                    etclass_c.text = cls_c
                # pdf volumn specialnum, subjectnum, gch
                volumn = self.cleanfield(one.get("volumn", ""))
                specialnum = self.cleanfield(one.get("special_no", "")).split(',')[0]
                subjectnum = self.cleanfield(one.get("subject_no", ""))
                gch = self.cleanfield(one.get("gch", ""))
                etgch = ET.SubElement(etrecord, 'gch')
                etgch.text = gch
                if volumn:
                    pdfpath = volumn + '/' + specialnum + subjectnum + '/' + gch + '/' + vol.zfill(3) + '/' + num.zfill(
                        3) + '/' + lngid + '.pdf'
                else:
                    pdfpath = ""
                etpdfpath = ET.SubElement(etrecord, 'pdfpath')
                etpdfpath.text = pdfpath

            try:
                self.writexml(etroot, pub_year)
            except:
                errfile = codecs.open('%s/errorxml.txt' % (self.dbdir),
                                      "a", "utf-8")
                errfile.writelines('%s\n' % (
                    ET.tostring(etroot, encoding='utf-8').decode('utf-8')))
                errfile.close()
                etroot.clear()
            etroot = ET.Element('cqviprecords')

    def json_split(self):
        for one_file in BaseDir.get_dir_all_files(self.json_addr):
            self.init_dic(one_file)
        self.json_to_xml()
        zip_name = '{}\\add.zip'.format(self.detail)
        zp = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
        for one in BaseDir.get_dir_all_files(self.xml_addr):
            new_muluname = self.xml_addr.replace(r"add", "")
            arcname = Path(one).relative_to(new_muluname)
            zp.write(one, arcname=arcname)
        zp.close()
        return True

    def check_file(self, file_name):
        if os.path.exists(file_name):
            return False
        else:
            return True

    def writexml(self, etroot, years):
        # 这里这么写是为了两块处理的xml的名字顺序能保持一致,不在已存在的xml里面写东西
        for fileno in range(1, 1000):
            xmlfile_name = '%s/%s_%s_%d.xml' % (self.xml_addr, f"add_中刊{self.now_date}", years, fileno)
            if self.check_file(xmlfile_name):
                break
        mdtree = minidom.parseString(ET.tostring(etroot))
        xmlfile = codecs.open(xmlfile_name, "a", "utf-8")
        xmlfile.writelines(mdtree.toprettyxml(encoding='utf-8').decode('utf-8'))
        xmlfile.close()
        del mdtree

    def findrelationship(self, showwriter, showorgan):
        numlist = []
        writerlist = []
        organlist = []
        writers = showwriter.split(';')
        organs = showorgan.split(';')
        num = 1
        for writer in writers:
            writer = writer.replace('[]', '')

            # wsObj = re.search('\[+(.*?)\]', writer)
            before_1 = re.findall("(\[)", writer)
            before_2 = re.findall("(\])", writer)
            if len(before_1) != len(before_2):
                raise Exception
            wsObj = re.search('\[+(\d+.*\d*?)\]', writer)
            wflags = ''
            organ = ''
            if wsObj:
                wflags = wsObj.group(1)
            if wflags.strip() == '':  # 无标
                if len(writers) == len(organs):  # 一一对应
                    organ = organs[num - 1]
                else:
                    organ = showorgan
            else:
                # 这个list用于判定每个作者下面的organ里面是否一个机构都对应不上, 若一个都对应不上, 则抛出异常
                one_writer_organ_list = list()
                for wflag in wflags.split(','):
                    wflag = wflag.strip()
                    if wflag != '':  # 有标
                        for org in organs:
                            before_org_1 = re.findall("(\[)", org)
                            before_org_2 = re.findall("(\])", org)
                            if len(before_org_1) != len(before_org_2):
                                raise Exception
                            osObj = re.search('\[%s\]' % (wflag), org)
                            if osObj:
                                organ += '%s;' % org
                                one_writer_organ_list.append("true")
                            else:
                                one_writer_organ_list.append("false")
                if "true" not in one_writer_organ_list:
                    raise Exception

            numlist.append(num)
            writerlist.append(writer)
            organlist.append(organ)
            num = num + 1

        return numlist, writerlist, organlist

    def run(self):
        self.exclude_set = set(self.exclude_list)
        bools = self.json_split()
        if bools:
            return self.mdb_dic, self.cnt, self.dele_cnt


def read_and_modify_docx(now_date, update_cnt, dele_cnt, batch):
    # 读取 Word 文档
    from docx import Document
    cur_path = BaseDir.get_file_dir_absolute(__file__)
    top_path = BaseDir.get_upper_dir(cur_path, -4)
    sPath = BaseFile.get_new_path(top_path, "download", "istic")
    BaseDir.create_dir(sPath)
    detail = BaseFile.get_new_path(sPath, now_date)
    BaseDir.create_dir(detail)

    doc_path = BaseFile.get_new_path(sPath, "model.docx")
    now_year = now_date[0:4]
    now_mon = now_date[4:6]
    now_day = now_date[-2:]
    new_date = now_year + "." + now_mon + "." + now_day
    doc = Document(doc_path)
    total_list = [16]
    dele_list = [18, 19, 20]
    date_list = [12]
    for paragraph in doc.paragraphs:
        if "数据批号" in paragraph.text:
            paragraph.text = f"数据批号：{batch}"
    cnt = 0
    for table in doc.tables:
        for row in table.rows:
            for cell in row.cells:
                cnt += 1
                if cnt in total_list:
                    cell.text = str(update_cnt)
                elif cnt in dele_list:
                    cell.text = f"删除数据：{dele_cnt}条 "
                elif cnt in date_list:
                    cell.text = new_date

    save_name = BaseFile.get_new_path(detail, "中国科学技术信息研究所-数据提交验收单.docx")
    if os.path.exists(save_name):
        os.remove(save_name)
    doc.save(save_name)
    zip_name1 = '{}\\验收单.zip'.format(detail)
    zp1 = zipfile.ZipFile(zip_name1, 'w', zipfile.ZIP_DEFLATED)
    arcname1 = Path(save_name).relative_to(detail)
    zp1.write(save_name, arcname=arcname1)
    zp1.close()
    return True


def make_mdb(data_dic, now_date):
    # 用于通用型处理
    from comtypes.client import CreateObject
    access = CreateObject('Access.Application')
    from comtypes.gen import Access

    cur_path = BaseDir.get_file_dir_absolute(__file__)
    top_path = BaseDir.get_upper_dir(cur_path, -4)
    sPath = BaseFile.get_new_path(top_path, "download", "istic")
    BaseDir.create_dir(sPath)
    detail = BaseFile.get_new_path(sPath, now_date)
    BaseDir.create_dir(detail)
    mdb_addr = BaseFile.get_new_path(detail, "mdb")
    BaseDir.create_dir(mdb_addr)
    dele_addr = BaseFile.get_new_path(detail, "delete")
    BaseDir.create_dir(dele_addr)
    dele_file_path = BaseFile.get_new_path(dele_addr, "lngid.txt")

    # sql_create = f"""CREATE TABLE tj ({field_names});"""
    sql_create = "CREATE TABLE tj (GCH5 TEXT, Years TEXT, Num TEXT, bookid TEXT, nums integer, CurTime TEXT);"

    for file in BaseDir.get_dir_all_files(mdb_addr):
        if time.time() - BaseFile.get_update_time(file) > 60 * 10:  # 60 * 10
            if BaseFile.is_file_exists(file):
                BaseFile.remove_file(file)
    filepath = '{}\\tj{}.mdb'.format(mdb_addr, now_date)
    if os.path.exists(filepath):
        os.remove(filepath)
    DBEngine = access.DBEngine
    db = DBEngine.CreateDatabase(filepath, Access.DB_LANG_GENERAL)
    db.BeginTrans()
    db.Execute(sql_create)
    for key, cnt in data_dic.items():
        gch5, pub_year, num, bookid = key.split("★")
        db.Execute(
            f"insert into tj(GCH5, Years, Num, bookid, nums, CurTime) values('{gch5}', '{pub_year}', '{num}', '{bookid}', {cnt}, '{now_date}add')")
        # cur_file.execute(sql_insert, values)
    db.CommitTrans()
    db.Close()
    zip_name = '{}\\统计.zip'.format(detail)
    zp = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
    arcname = Path(filepath).relative_to(mdb_addr)
    zp.write(filepath, arcname=arcname)
    zp.close()

    # 顺便在这里把delete的文件也压缩了

    if os.path.exists(dele_file_path):
        zip_name1 = '{}\\删除.zip'.format(detail)
        zp1 = zipfile.ZipFile(zip_name1, 'w', zipfile.ZIP_DEFLATED)
        arcname1 = Path(dele_file_path).relative_to(dele_addr)
        zp1.write(dele_file_path, arcname=arcname1)
        zp1.close()

        # 顺便生成一个readme
        texts = f'本次数据的详细信息如下：\n（1）{now_date}的更新数据\n\t在{now_date}\\add\\*.xml；\n（2）数据统计表\n\t包含期刊的馆藏号(gch5)、年(years)、期(num)、期号(bookid)、每期文章数量(nums)信息；在{now_date}\\统计\\tj{now_date}.mdb；\n（3）删除文献表\n\t包含要删除文献的ID号，在{now_date}\\delete\\lngid.txt；'
    else:
        texts = f'本次数据的详细信息如下：\n（1）{now_date}的更新数据\n\t在{now_date}\\add\\*.xml；\n（2）数据统计表\n\t包含期刊的馆藏号(gch5)、年(years)、期(num)、期号(bookid)、每期文章数量(nums)信息；在{now_date}\\统计\\tj{now_date}.mdb；'
    readme_file_path = BaseFile.get_new_path(detail, "readme.txt")
    with open(readme_file_path, mode="w", encoding="utf-8") as f:
        f.write(texts)
    zip_name2 = '{}\\readme.zip'.format(detail)
    zp2 = zipfile.ZipFile(zip_name2, 'w', zipfile.ZIP_DEFLATED)
    arcname2 = Path(readme_file_path).relative_to(detail)
    zp2.write(readme_file_path, arcname=arcname2)
    zp2.close()
    return True


# 上传文件
class SendCollect(object):
    def __init__(self, now_date):
        self.headers = {
            "Accept-Encoding": "gzip, deflate",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "Host": "ftp.cqvip.com:9001",
            "Referer": "http://ftp.cqvip.com:9001/login?redirect=%2Ffiles%2F",
            # "Cookie": "auth=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJRCI6OSwiYWRtaW4iOmZhbHNlLCJhbGxvd0NvbW1hbmRzIjpmYWxzZSwiYWxsb3dFZGl0Ijp0cnVlLCJhbGxvd05ldyI6dHJ1ZSwiYWxsb3dQdWJsaXNoIjp0cnVlLCJsb2NrUGFzc3dvcmQiOnRydWUsImNvbW1hbmRzIjpbImdpdCIsInN2biIsImhnIl0sImNzcyI6IiIsImxvY2FsZSI6InpoLWNuIiwicGFzc3dvcmQiOiIiLCJydWxlcyI6W10sImZpbGVzeXN0ZW0iOiJFOlxcd2Vicm9vdFxcemhvbmd4aW5zdW8iLCJ1c2VybmFtZSI6Inpob25neGluc3VvX3VwbG9hZCIsInZpZXdNb2RlIjoibGlzdCIsImV4cCI6MTcwMjcxMTc5OCwiaXNzIjoiRmlsZSBCcm93c2VyIn0.whcjz_zLQQFbsSFwSgX1gNqJPytSHtQzJKYDgPzWoCA"
        }
        self.bs = BaseRequest()
        self.now_date = now_date
        self.cur_path = BaseDir.get_file_dir_absolute(__file__)
        self.top_path = BaseDir.get_upper_dir(self.cur_path, -4)
        self.sPath = BaseFile.get_new_path(self.top_path, "download", "istic")
        BaseDir.create_dir(self.sPath)
        self.detail = BaseFile.get_new_path(self.sPath, self.now_date)
        BaseDir.create_dir(self.detail)

    # 建立目录
    def test_mkdir(self):
        url = f"http://ftp.cqvip.com:9001/api/resource/{self.now_date}/"
        # print(self.headers)
        b, e, r = self.bs.base_request_post(url, headers=self.headers)

        return b

    # 上传文件
    def upload_file(self, file_name):
        last_name = file_name.split("\\")[-1]
        url = f"http://ftp.cqvip.com:9001/api/resource/{self.now_date}/{last_name}"
        files = {"file": (file_name, file_obj := open(file_name, "rb"), "gzip")}
        b, e, r = self.bs.base_request_post(url, headers=self.headers, files=files)
        file_obj.close()
        return True

    # 模拟登陆, 获取cookie
    def login_web(self):
        url = "http://ftp.cqvip.com:9001/api/auth/get"
        user_name = "zhongxinsuo_upload"
        pass_word = "9Zhc3tFke"
        data = {
            "password": pass_word,
            "username": user_name,
            "recaptcha": "",
        }
        json_data = json.dumps(data)
        b, e, r = self.bs.base_request_post(url, data=json_data, headers=self.headers)
        if b:
            cookies = f"auth={r.text}"
            self.headers["Cookie"] = cookies
        return b

    def run(self):
        err_addr = BaseFile.get_new_path(self.detail, "errors")
        BaseDir.create_dir(err_addr)
        errorxml = BaseFile.get_new_path(err_addr, "errorxml.txt")
        if os.path.exists(errorxml):
            return False, "存在errorxml"
        doc_file = BaseFile.get_new_path(self.detail, "验收单.zip")
        add_zip_file = BaseFile.get_new_path(self.detail, "add.zip")
        tongji_zip_file = BaseFile.get_new_path(self.detail, "统计.zip")
        dele_zip_file = BaseFile.get_new_path(self.detail, "删除.zip")
        readme_file = BaseFile.get_new_path(self.detail, "readme.zip")
        if os.path.exists(dele_zip_file):
            all_file_list = [doc_file, add_zip_file, tongji_zip_file, dele_zip_file, readme_file]
        else:
            all_file_list = [doc_file, add_zip_file, tongji_zip_file, readme_file]
        # all_file_list = [doc_file, tongji_zip_file, dele_zip_file, readme_file]
        br = self.login_web()
        if br:
            new_br = self.test_mkdir()
            if new_br:
                for one_file in all_file_list:
                    brs = self.upload_file(one_file)
                    if not brs:
                        return False, ""
                return True, ""
            else:
                return False, ""
        else:
            return False, ""


def dele_files(now_date):
    cur_path = BaseDir.get_file_dir_absolute(__file__)
    top_path = BaseDir.get_upper_dir(cur_path, -4)
    sPath = BaseFile.get_new_path(top_path, "download", "istic")
    BaseDir.create_dir(sPath)
    detail = BaseFile.get_new_path(sPath, now_date)
    BaseDir.create_dir(detail)
    json_addr = BaseFile.get_new_path(detail, "jsons")
    BaseDir.create_dir(json_addr)
    add_addr = BaseFile.get_new_path(detail, "add")
    BaseDir.create_dir(add_addr)
    for one_file in BaseDir.get_dir_all_files(json_addr):
        os.remove(one_file)
    for two_file in BaseDir.get_dir_all_files(add_addr):
        os.remove(two_file)
    return True


def upload_error_file(now_date):
    bb = BaseBoto3(aws_access_key_id='sc5L9Prb8toPoIPD',
                   aws_secret_access_key='JjHxakAB3X4joRhEFSURtdd5Pks6iXl5',
                   endpoint_url="http://192.168.30.21:9000"
                   )

    bb.set_is_low_level(True)
    bb.conn_session()
    bb.get_client()
    cur_path = BaseDir.get_file_dir_absolute(__file__)
    top_path = BaseDir.get_upper_dir(cur_path, -4)
    sPath = BaseFile.get_new_path(top_path, "download", "istic")
    BaseDir.create_dir(sPath)
    detail = BaseFile.get_new_path(sPath, now_date)
    BaseDir.create_dir(detail)
    err_addr = BaseFile.get_new_path(detail, "errors")
    BaseDir.create_dir(err_addr)
    file_list = []
    for one_file in BaseDir.get_dir_all_files(err_addr):
        file_list.append(one_file)
    if len(file_list) != 0:
        for one_data in file_list:
            last_name = one_data.split("\\")[-1]
            new_key = f"project/中信所/{now_date}/{last_name}"
            bb.upload_file(one_data, "dc.cqvip.com", new_key)
        return True, "存在"
    else:
        return True, "不存在"


#######################################################################################

@router.post("/solve_datas/istic_data")
async def get_one_file1(input: InputPlatformModel[IsticData]):
    return_info = ReturnInfo()
    datas = input.data.datas
    dele_batch = input.data.dele_batch
    data_list = datas["data"]
    bools = get_datas(data_list, dele_batch)
    if bools:
        return_info.status = SUCCESS
        return_info.msg_code = 200
        return_info.msg = ""
        return_info.data = {}
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = ""
        return_info.data = {}
        return return_info.todict()


@router.post("/solve_datas/solve_istic_data")
async def get_one_file2(input: InputPlatformModel[IsticTasks]):
    return_info = ReturnInfo()
    # 将就用这个model
    now_date = input.data.now_date
    batch = input.data.batch
    ddd = Json2Xml(now_date)
    mdb_dic, total_cnt, dele_cnt = ddd.run()
    br1 = read_and_modify_docx(now_date, total_cnt, dele_cnt, batch)
    if br1:
        br = make_mdb(mdb_dic, now_date)
        if br:
            return_info.status = SUCCESS
            return_info.msg_code = 200
            return_info.msg = ""
            return_info.data = {}
            return return_info.todict()
        else:
            return_info.status = FAILED
            return_info.msg_code = 400
            return_info.msg = ""
            return_info.data = {}
            return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = ""
        return_info.data = {}
        return return_info.todict()


@router.post("/solve_datas/send_files")
async def get_one_file2(input: InputPlatformModel[IsticTasks]):
    return_info = ReturnInfo()
    # 将就用这个model
    now_date = input.data.now_date
    ddd = SendCollect(now_date)
    br, texts = ddd.run()
    if br:
        return_info.status = SUCCESS
        return_info.msg_code = 200
        return_info.msg = texts
        return_info.data = {}
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = texts
        return_info.data = {}
        return return_info.todict()


@router.post("/solve_datas/dele_files")
async def get_one_file2(input: InputPlatformModel[IsticTasks]):
    return_info = ReturnInfo()
    # 将就用这个model
    now_date = input.data.now_date

    br = dele_files(now_date)
    if br:
        return_info.status = SUCCESS
        return_info.msg_code = 200
        return_info.msg = ""
        return_info.data = {}
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = ""
        return_info.data = {}
        return return_info.todict()


@router.post("/solve_datas/post_error")
async def get_one_file2(input: InputPlatformModel[IsticTasks]):
    return_info = ReturnInfo()
    # 将就用这个model
    now_date = input.data.now_date

    br, exists = upload_error_file(now_date)
    if br:
        return_info.status = SUCCESS
        return_info.msg_code = 200
        return_info.msg = exists
        return_info.data = {}
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = ""
        return_info.data = {}
        return return_info.todict()
