import binascii
import copy
import json
import shutil
import sys
import warnings

import facade
import mmh3 as mmh3

import pymysql
from logicaltool.BaseTools.file_tools import FileTools
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
configfile = BaseDir.get_new_path(curPath, "db.ini")


class Check(object):
    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.connection = MongoClient(
            "mongodb://xujiangrw:vipdatacenter@192.168.30.171:27017",
            # 如果“True”且服务器正在运行而没有日志记录，阻塞，直到服务器将所有数据文件同步到磁盘为止。
            fsync=False
        )
        self.db = self.connection['cnipa']
        self.mysqlutils = facade.MysqlUtiles(configfile,
                                             "db",
                                             logger=self.logger)
        self.count = 0

    def select(self, date):
        count = self.db.pagehtml.count({"date": date})
        print(count)
        results = self.db.pagehtml.find({"date": date})
        for result in results:
            self.count += 1
            dicts = json.loads(result["html"])
            objid = result["_id"]
            page = result["page"]
            listsipo = dicts["searchResultDTO"]["searchResultRecord"]
            if not listsipo:
                sql = "select allcount from page where `pub_date`='{}' and page='0'".format(date)
                row = self.mysqlutils.SelectFromDBFetchOne(sql)
                if row:
                    print(date)
                    print(self.count)
                    continue
                print("更新状态为0")
                sql = "update `page` set `stat`=0 where `pub_date`='{}' and `page`='{}'".format(date, page)
                self.mysqlutils.ExeSqlToDB(sql)
                print("删除Mongodb中的数据")
                self.db.pagehtml.delete_one({"_id": objid})
                if page == "1":
                    print(date)
                    print(page)
                    sys.exit(-1)
            else:
                print(date)
                print(self.count)

    def compara(self):
        """
        对比两张mongodb表的数据
        :return:
        """
        results = self.db.absfull.find({}, {"requestsid": 1})
        for result in results:
            print(result)


class Compara(FileTools):

    def __init__(self):
        super().__init__()
        self.connection = MongoClient(
            "mongodb://xujiangrw:vipdatacenter@192.168.30.171:27017",
            # 如果“True”且服务器正在运行而没有日志记录，阻塞，直到服务器将所有数据文件同步到磁盘为止。
            fsync=False
        )
        self.db = self.connection['cnipa']
        self.mysqlutils = facade.MysqlUtiles(configfile,
                                             "db",
                                             logger=self.logger)

    def compara(self):
        """
        对比两张mongodb表的数据
        :return:
        """

        results = self.db.absfull.find({}, {"requestsid": 1})
        # results =self.db.pub_all_id_school.aggregate([{"$group": {"_id": {"requestsid": "$requestsid"}, "number": {"$sum": 1}}}])
        for result in results:
            self.inputobj1.add(result["requestsid"])
        print(len(self.inputobj1))

        results = self.db.pub_all_id_school.find({}, {"requestsid": 1})
        for result in results:
            self.inputobj2.add(result["requestsid"])

        print(self.inputobj2 - self.inputobj1)

        # self.db.pub_all_id_school.update_many({"requestsid":{"$in":['CN201811072667.720181218FM', 'CN201811048458.920181218FM', 'CN201810815835.020190101FM', 'CN201811095188.720181221FM', 'CN201510126019.520150708FM', 'CN201811179307.720181225FM', 'CN201811140177.620181221FM', 'CN201811294802.220181221FM', 'CN201820861763.920190101XX', 'CN201820731571.620190101XX', 'CN201810930711.720190101FM', 'CN201510934967.120181228SQ', 'TWI490139102115237', 'CN201811209092.920181130FM', 'TW201418087A', 'CN201811200371.920181218FM', 'CN101651677B', 'CN201811062181.520181123FM', 'CN201811177988.320181225FM', 'TWI490139B', 'CN201811107059.520190101FM', 'CN201811251593.320190101FM', 'CN201811136048.X20181218FM', 'CN201811258567.320181221FM', 'CN201811008048.120190101FM']}}, {"$set": {'stat': 0}})
        rows = self.db.pub_all_id_school.find({"stat": 0})
        for row in rows:
            print(row)

    def select(self):
        results = self.db.absfull.find({}, {"requestsid": 1})
        for result in results:
            requestsid = result["requestsid"]
            ascii = binascii.b2a_hex(mmh3.hash_bytes(requestsid))
            self.inputobj1.add((requestsid, ascii))
        sql = "insert into `idhash` (rid,hash) values (%s,%s)"
        self.mysqlutils.ExeSqlMany(sql, self.inputobj1)

    def filedeal(self):
        """
        对图片进行处理和上传
        :return:
        """
        for filepath in BaseDir.get_dir_all_files(r"\\192.168.30.177\样例数据\cnipapatent\raw"):
            print(filepath)
            filename = BaseDir.get_filename_not_extsep(filepath)
            ascii = binascii.b2a_hex(mmh3.hash_bytes(filename))
            ascii = ascii[:4]
            path = r"\\192.168.30.177\样例数据\cnipapatent\cnipapatent_cover_raw_20190929\00027"
            newpath = BaseDir.get_new_path(path, ascii[:2].decode(), ascii[2:4].decode())
            BaseDir.create_dir(newpath)
            newfilepath = BaseDir.get_new_path(newpath, filename.lower() + ".jpg")
            print(newfilepath)
            BaseDir.copy_file_to_file(filepath, newfilepath)

    def fileandid(self):
        with open(BaseDir.get_new_path("allcover.big_json"), 'w', encoding="utf-8") as f:
            for filepath in BaseDir.get_dir_all_files(r"\\192.168.30.177\样例数据\cnipapatent\raw"):
                filename = BaseDir.get_filename_not_extsep(filepath)
                ascii = binascii.b2a_hex(mmh3.hash_bytes(filename))
                ascii = ascii[:4]
                path = "/smartlib/00027/{}/{}/{}".format(ascii[:2].decode(), ascii[2:4].decode(),
                                                         filename.lower() + ".jpg")
                print(path)
                dicts = {"requestid": filename, "cover": path}
                jsonmsg = json.dumps(dicts, ensure_ascii=False)
                f.write(jsonmsg + "\n")

    def fileandid2(self):
        with open(BaseDir.get_new_path("allcover.big_json"), 'w', encoding="utf-8") as f:
            for filepath in BaseDir.get_dir_all_files(
                    r"\\192.168.30.177\样例数据\cnipapatent\cnipapatent_cover_raw_all"):
                filename = BaseDir.get_filename_not_extsep(filepath)
                filepath = filepath.replace(r"\\192.168.30.177\样例数据\cnipapatent\cnipapatent_cover_raw_all",
                                            "")
                filepath = "/smartlib" + filepath.replace("\\", "/")
                dicts = {"requestid": filename.upper(), "cover": filepath}
                print(dicts)
                jsonmsg = json.dumps(dicts, ensure_ascii=False)
                f.write(jsonmsg + "\n")

    def fileandid3(self):
        with open(BaseDir.get_new_path("20190929_jsoncover.txt"), 'w', encoding="utf-8") as f:
            for filepath in BaseDir.get_dir_all_files(
                    r"\\192.168.30.177\样例数据\cniprpatent\cniprpatent_cover_raw_20190929"):
                filename = BaseDir.get_filename_not_extsep(filepath)
                filepath = filepath.replace(r"\\192.168.30.177\样例数据\cniprpatent\cniprpatent_cover_raw_20190929",
                                            "")
                filepath = "/smartlib/cniprpatent" + filepath.replace("\\", "/")
                dicts = {"pub_no": filename, "cover": filepath}
                print(dicts)
                jsonmsg = json.dumps(dicts, ensure_ascii=False)
                f.write(jsonmsg + "\n")

    def set_pagehtmltoallid(self):
        count = self.db.pagehtml_school.count({})
        results = self.db.pagehtml_school.find({}, {"html": 1})
        nowcount = 0
        for result in results:
            nowcount += 1
            jsonmsg = json.loads(result["html"])
            arraysearch = jsonmsg["searchResultDTO"]["searchResultRecord"]
            for onedata in arraysearch:
                requestsid = onedata["fieldMap"]["ID"]
                msg = json.dumps(onedata)
                self.db.pub_all_id_school.update({'requestsid': requestsid}, {"$set": {'msg': msg}})

            print("{}/{}".format(nowcount, count))

    def set_new_table(self):
        count = self.db.pagehtml.count({})
        results = self.db.pagehtml.find({}, {"html": 1})
        nowcount = 0
        for result in results:
            nowcount += 1
            jsonmsg = json.loads(result["html"])
            arraysearch = jsonmsg["searchResultDTO"]["searchResultRecord"]
            for onedata in arraysearch:
                requestsid = onedata["fieldMap"]["ID"]
                msg = json.dumps(onedata)
                try:
                    self.db.pageidjson.insert({'requestsid': requestsid, "pageidinfo": msg})
                except DuplicateKeyError as e:
                    warnings.warn(str(e))

            print("{}/{}".format(nowcount, count))

    def get_num_id(self):
        """
        得到引证文献数量比下载页大的列表
        :return:
        """
        results = self.db.patentinfo.find({})
        count = 0
        for result in results:
            requestid = result["requestsid"]
            patentinfo = result["patentinfo"]
            jsondicts = json.loads(patentinfo)
            # 同族
            cognation_count = jsondicts["cognation_count"]
            # 引证
            patcit_count = jsondicts["patcit_count"]
            # 法律
            totalCount = jsondicts["lawStatePagination"]["totalCount"]
            app_no = jsondicts["literaInfo"]["nrdAn"]
            pub_no = jsondicts["literaInfo"]["nrdPn"]
            stat = 1
            jsonmsg = ""
            jsonmsg2 = ""
            if int(patcit_count) > 5:

                if result["pmsg"] == "":
                    stat = 0
                else:
                    dicts = json.loads(result["pmsg"])
                    dicts2 = copy.deepcopy(dicts)
                    for key in dicts.keys():
                        strings = dicts[key]["citingpatList"]
                        if strings is None:
                            stat = 0
                            del dicts2[key]

                    if dicts2:
                        jsonmsg = json.dumps(dicts2, ensure_ascii=False)

            if int(totalCount) > 5:
                if result["lawmsg"] == "":
                    stat = 0
                else:
                    dicts = json.loads(result["lawmsg"])
                    dicts2 = copy.deepcopy(dicts)
                    for key in dicts.keys():
                        strings = dicts[key]["lawStateList"]
                        if strings is None:
                            stat = 0
                            del dicts2[key]
                    if dicts2:
                        jsonmsg2 = json.dumps(dicts2, ensure_ascii=False)
            self.db.patentinfo.update({"_id": result["_id"]}, {"$set": {"stat": stat,
                                                                        "pmsg": jsonmsg,
                                                                        "lawmsg": jsonmsg2}})
            count += 1
            print(count)

    def mongodb_table_merge(self):
        """
        将pub_all_id 和 pub_all_id_school 的数据
        全部转移到 pageidjson里
        保留 pub_all_id 只为了查看日期页与数据的对应关系
        :return:
        """
        while True:
            results = self.db.pub_all_id_school.find({"tempstat": 0}).limit(1000)
            if not results:
                break
            for row in results:
                del row["_id"]
                del row["tempstat"]
                del row["page"]
                del row["date"]
                del row["msg"]
                requestsid = row["requestsid"]
                print(requestsid)
                del row["requestsid"]
                self.db.pageidjson.update({"requestsid": requestsid}, {"$set": row})
                self.db.pub_all_id_school.update({"requestsid": requestsid}, {"$set": {"tempstat": 1}})
                # print(row)

    def add_cp_f(self):
        """
        添加被引量字段
        :return:
        """
        while True:
            results = self.db.pageidjson.find({"cpnum": {"$exists": False}}).limit(1000)
            for row in results:
                dicts = json.loads(row["pageidinfo"])
                cpnum = dicts["fieldMap"]["CPNUM"]
                requestsid = row["requestsid"]
                self.db.pageidjson.update({"requestsid": requestsid}, {"$set": {"cpnum": int(cpnum)}})
                print(requestsid)

    def get_cover(self):
        """
        获取图片并copy到本地
        :return:
        """
        self.conn_sqlite3(r"C:\Users\xuzhu\Desktop\CnipaPatent_20190128.db3")
        sql = "select cover_path from base_obj_meta_a"
        self.set_sql(sql)
        rows = self.sqlite_select()
        for row in rows:
            coverpath = row[0]
            if coverpath != "":
                coverpath = coverpath.replace("/smartlib", "").replace("/", "\\")
                filepath = r"\\192.168.30.177\样例数据\cnipapatent\cnipapatent_cover_new_raw_20190110\00027"
                print(filepath)
                coverpath = filepath + coverpath
                print(coverpath)

                shutil.copy(filepath, BaseDir.get_new_path(BaseDir.get_file_dir_absolute(__file__), "cover"))

    def set_stat_0(self):
        """
        设置 needdownpage的stat状态回0
        :return:
        """
        sql = "SELECT TRIM(TRAILING ':' FROM pub_date),count(*) FROM `page` WHERE allcount ='0' and pub_date LIKE '%:%' GROUP BY pub_date ORDER BY pub_date DESC"
        rows = self.mysqlutils.SelectFromDB(sql)
        lists = []
        for row in rows:
            print(row)
            lists.append(row[0])
        print(lists)
        sql = "update needdownpage set stat=0 WHERE startdate in {}".format(tuple(lists))
        print(sql)
        self.mysqlutils.ExeSqlToDB(sql)
        self.mysqlutils.close()

    def create_mapping(self):
        """
        生成从177 到数据库的图片映射,由于177分两级目录后查找一个图片太慢
        生成数据库对照表方便查询
        :return:
        """
        Listsql = []
        count = 0
        for filepath in BaseDir.get_dir_all_files(r"\\192.168.30.177\样例数据\cnipapatent\cnipapatent_cover_raw_20190604"):
            filename = filepath.split("\\")[-1]
            filename = filename.replace(".jpg", "")
            filename = filename.upper()
            filepath = filepath.replace("\\\\192.168.30.177\\样例数据\\cnipapatent\\", "")
            sql = "insert ignore into coverpath(rawid,filepath) value('{}','{}')".format(filename,
                                                                                         pymysql.escape_string(
                                                                                             filepath))
            Listsql.append(sql)
            count += 1
            if count % 5000 == 1:
                print(count)
                self.mysqlutils.ExeSqlListToDB(Listsql)
                Listsql.clear()
        if len(Listsql) > 0:
            print(count)
            self.mysqlutils.ExeSqlListToDB(Listsql)
            Listsql.clear()


if __name__ == "__main__":
    check = Compara()
    check.fileandid3()
    # check.create_mapping()
    #for filepath in BaseDir.get_dir_all_files(r"F:\test2\test\20190406"):
    #    os.remove(filepath)
