import gzip
import os
import time

import facade
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
configfile = BaseDir.get_new_path(curPath, "db.ini")


class GZipTool:
    def __init__(self, bufSize):
        self.bufSize = bufSize
        self.fin = None
        self.fout = None

    def compress(self, src, dst):
        self.fin = open(src, 'rb')
        self.fout = gzip.open(dst, 'wb')

        self.__in2out()

    def decompress(self, gzFile, dst):
        self.fin = gzip.open(gzFile, 'rb')
        self.fout = open(dst, 'wb')

        self.__in2out()

    def __in2out(self, ):
        while True:
            buf = self.fin.read(self.bufSize)
            if len(buf) < 1:
                break
            self.fout.write(buf)

        self.fin.close()
        self.fout.close()


class Utils(object):

    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(configfile, "db", logger=self.logger)
        self.accref = set()
        self.accref2 = set()

    def get_count(self):
        """
        读取haddop count 方法统计的ref.txt 文件注意encoding 不一定是utf8
         ，统计每行有多少个引号，来确认引号数量，写入count.txt
        :return:
        """
        count = 0
        with open("./count.txt", "w", encoding="utf-8") as inputf:
            with open(r"C:\Users\xuzhu\Desktop\ref.txt", "r", encoding="GB2312") as f:
                for line in f:
                    results = line.split("\t")
                    acc = results[0].strip()
                    num = results[1].count(";")
                    inputf.write(acc + "," + str(num) + "\n")
                    count += 1
                    if count % 10000 == 1:
                        print(count)

    def compare(self):
        """
        读取get_count统计的文件 和 数据库导出的文件 进行比对
        sql 分别读取 accnum 和ref_cnt
        :return:
        """
        with open("./count.txt", "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                self.accref.add(line)
        with open("./sqlcount.txt", "r", encoding="GB2312") as f:
            for line in f:
                line = line.strip()
                self.accref2.add(line)

        results1 = self.accref & self.accref2
        with open("./countjiao.txt", "w", encoding="utf-8") as f:
            for value in results1:
                f.write(value + "\n")
        # results2 = self.accref - self.accref2
        # with open("./countcha.txt", "w", encoding="utf-8") as f:
        #     for value in results2:
        #         f.write(value + "\n")

    def read_sql_stat_is_5(self):
        """
        读取数据库已经是5的id 判断哪些应该应该转0 哪些应该转5
        :return:
        """
        with open("./sqlcount5.txt", "r", encoding="GB2312") as f:
            for line in f:
                line = line.strip()
                self.accref2.add(line)
        with open("./countjiao.txt", "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                self.accref.add(line)
        # 数据库由5到0的状态的数据
        results1 = self.accref2 - self.accref
        with open("./stat5to0.txt", "w", encoding="utf-8") as f:
            for value in results1:
                f.write(value + "\n")
        # 数据库由0到5的状态的数据
        results2 = self.accref - self.accref2
        with open("./stat0to5.txt", "w", encoding="utf-8") as f:
            for value in results2:
                f.write(value + "\n")

    def set_ref_stat_5(self):
        """
        将countjiao.txt的状态写5表示已解析且数据量正确
        :return:
        """
        count = 0
        sets = set()
        stat = 5
        # with open("./countjiao.txt", "r", encoding="utf-8") as f:
        # stat=0
        # with open("./stat5to0.txt", "r", encoding="utf-8") as f:
        stat = 5
        with open("./stat0to5.txt", "r", encoding="utf-8") as f:
            for line in f:
                count = count + 1
                acc = line.split(",")[0].strip()
                sets.add(acc)
                if len(sets) > 100000:
                    sql = "UPDATE `articlenew` set ref_stat={} WHERE AccessionNumber in {}".format(stat, tuple(sets))
                    self.mysqlutils.ExeSqlToDB(sql)
                    sets.clear()
                    count = 0
        sql = "UPDATE `articlenew` set ref_stat={} WHERE AccessionNumber in {}".format(stat, tuple(sets))
        self.mysqlutils.ExeSqlToDB(sql)
        sets.clear()
        count = 0

    def set_stat_5(self):
        """
        通过hadoop导下来确认数据是否已上
        :return:
        """
        num = 0
        sets = set()
        with open(r"C:\Users\xuzhu\Desktop\EI.txt", mode="r", encoding="utf-8") as f:
            for fLine in f:
                fLine = fLine.strip()
                sets.add(fLine)
                num = num + 1
                if num > 10000:
                    sql = "update articlenew set stat=5 where AccessionNumber in {}".format(tuple(sets))
                    self.mysqlutils.ExeSqlToDB(sql)
                    num = 0
                    sets.clear()
        sql = "update articlenew set stat=5 where AccessionNumber in {}".format(tuple(sets))
        self.mysqlutils.ExeSqlToDB(sql)

    def transformationgz(self):
        """
        转换 将原来压成gz的文件解压后按1g每个写入新的文件 然后用工具
        重新压缩
        :return:
        """

        gzTool = GZipTool(200 * 1024)
        filename = str(int(time.time())) + ".big_json"
        for filepath in BaseDir.get_dir_all_files(r"F:\biggz\gz"):
            print(filepath)
            dirname = filepath.split("\\")[-2]
            gz1gpath = BaseDir.get_new_path(r"F:\biggz\gz1G", dirname)
            if not BaseDir.is_dir_exists(gz1gpath):
                os.makedirs(gz1gpath)
                filename = str(int(time.time())) + ".big_json"
            dst = BaseDir.get_new_path(r'F:\biggz\big_json', "temp.big_json")
            gzTool.decompress(filepath, dst)
            big_dir = BaseDir.get_new_path(r"F:\biggz\big_json1G", dirname)
            BaseDir.create_dir(big_dir)
            big_path = BaseDir.get_new_path(big_dir, filename)
            count = 0
            tempstr = ""
            with open(dst, "r", encoding="utf-8") as inf:
                for line in inf:
                    line = line.strip()
                    tempstr = tempstr + line + "\n"
                    count = count + 1
                    if count > 200:
                        with open(big_path, "a", encoding="utf-8") as f:
                            f.write(tempstr)
                        count = 0
                        tempstr = ""
                        print(BaseDir.get_file_size(big_path))
                        if BaseDir.get_file_size(big_path) > 1100000000:
                            filename = str(int(time.time())) + ".big_json"
                            big_path = BaseDir.get_new_path(big_dir, filename)
                with open(big_path, "a", encoding="utf-8") as f:
                    f.write(tempstr)
                count = 0
                tempstr = ""

    def transformation(self):
        """
        这个是将其他大文件转换成1G的文件，没有解压的过程
        :return:
        """
        gzTool = GZipTool(200 * 1024)
        filename = str(int(time.time())) + ".big_json"
        big_dir = r"D:\xujiang\download\EI\download\json\refs2"
        BaseDir.create_dir(big_dir)
        big_path = BaseDir.get_new_path(big_dir, filename)
        for filepath in BaseDir.get_dir_all_files(r"D:\xujiang\download\EI\download\json\refs"):
            print(filepath)
            count = 0
            tempstr = ""
            with open(filepath, "r", encoding="utf-8") as inf:
                for line in inf:
                    line = line.strip()
                    tempstr = tempstr + line + "\n"
                    count = count + 1
                    if count > 200:
                        with open(big_path, "a", encoding="utf-8") as f:
                            f.write(tempstr)
                        count = 0
                        tempstr = ""
                        print(BaseDir.get_file_size(big_path))
                        if BaseDir.get_file_size(big_path) > 1100000000:
                            filename = str(int(time.time())) + ".big_json"
                            big_path = BaseDir.get_new_path(big_dir, filename)
                with open(big_path, "a", encoding="utf-8") as f:
                    f.write(tempstr)
                count = 0
                tempstr = ""


if __name__ == "__main__":
    utils = Utils()
    # utils.get_count()
    # utils.compare()
    # utils.read_sql_stat_is_5()
    # utils.set_stat_5()
    utils.transformation()
