# -*- coding: utf-8 -*-
# @Time    : 2020/9/3 15:14
# @Author  : suhong
# @File    : paper_mg_2_db3.py
# @Software: PyCharm
import re
import sqlite3
import time

from re_common.baselibrary.utils.basedir import BaseDir

from re_common.baselibrary.utils.basefile import BaseFile
from re_common.baselibrary.utils.basepymongo import BasePyMongo


class Paper2Db3(object):
    def __init__(self):
        self.basemongo = BasePyMongo(
            "mongodb://cjrw:vipdatacenter@192.168.31.243:32920,192.168.31.206:32920,192.168.31.208:32920/?authSource=htmljsonlatest")
        self.basemongo.use_db("htmljsonlatest")
        self.basemongo.create_col("a_paper_latest")
        self.now_date = time.strftime('%Y%m%d', time.localtime())

    def clean_author(self, author):
        new_author = ""
        rule = re.compile(r"《.*中国青年报.*》.*")
        rule1 = re.compile(r"来源：中国青年报.*")
        author1 = rule.sub('', author)
        author2 = rule1.sub('', author1)
        author = author2.replace("海外网", "").replace("特约记者", "").replace("文/图", "").replace("/绘", "").replace("/文",
                                                                                                             "").replace(
            "/图", "").replace("本报驻联合国记者", "").replace("本报记者", "").replace("新华社记者", "").replace("□", "").replace("■",
                                                                                                                "").replace(
            "本报评论员", "").replace("采访组：", "").replace("\n", "").replace("字数:", "").replace("通讯员", "").replace("农村金融时报记者",
                                                                                                             "").replace(
            "经济日报·中国经济网记者", "").replace("　", ";")
        pat = '本.*?记者'
        xx = re.findall(pat, author)
        for item in xx:
            author = author.replace(item, "")

        author = author.replace(" ", ";").strip(";")
        if ";;" in author:
            author = author.replace(";;", ";")
            if author.startswith(";"):
                author = author[1:]
        string = ""
        for i in author.split(";"):
            if len(i) > 1:
                new_author += i + ";"
                continue
            else:
                string += i
                if len(string) >= 2:
                    new_author += string + ";"
                    string = ""
        new_author = new_author.strip(";")
        return new_author

    def get_db3(self, journal_name):
        file_path = r'E:\download\paper\{}'.format(self.now_date)
        BaseDir.create_dir(file_path)
        db3_path = file_path + '/' + journal_name + '.db3'
        if not BaseFile.is_file_exists(db3_path):
            BaseFile.copy_file_to_file(r"E:\db3\papers/temp.db3", db3_path)

        conn = sqlite3.connect(db3_path, check_same_thread=False)
        cur = conn.cursor()
        sql = "replace INTO paper_info(lngid,sub_db_id,batch,rawid,source_type,journal_name,provider_url,title,title_catalyst,title_alt,abstract,author,pub_year,pub_date,meeting_counts,meeting_counts_as,`index`,sub_db,zt_provider,provider,product,country,language,bzcolumn,web_site,down_date) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
        slist = list()

        for item in self.basemongo.find({"journal_name": journal_name}):
            lngid = item['lngid']
            sub_db_id = item["sub_db_id"]
            batch = item["batch"]
            rawid = item["rawid"]
            source_type = item["source_type"]
            journal_name = item["journal_name"]
            provider_url = item["provider_url"]
            title = item["title"]
            title_catalyst = item["title_catalyst"]
            title_alt = item["title_alt"]
            author = self.clean_author(item['author'])
            abstract = item["abstract"]
            pub_year = item["pub_year"]
            pub_date = item["pub_date"]
            meeting_counts = item["meeting_counts"].lstrip("0").zfill(2)
            meeting_counts_as = ''
            index = item["index"]
            sub_db = item["sub_db"]
            zt_provider = item["zt_provider"]
            provider = item["provider"]
            product = item["product"]
            country = item["country"]
            language = item["language"]
            web_site = item["web_site"]
            down_date = item["down_date"]

            # 处理栏目
            bzcolumn = ""
            if abstract.startswith("【"):
                patt = r'【(.*?)】'
                xx = re.findall(patt, abstract)
                if xx:
                    bzcolumn = xx[0]

            # if len(title) == 0:
            #     print("title为空不插入")
            # else:
            #     slist.append((lngid,sub_db_id,batch,rawid,source_type,journal_name,provider_url,title,title_catalyst,title_alt,abstract,author,pub_year,pub_date,meeting_counts,meeting_counts_as,index,sub_db,zt_provider,provider,product,country,language,bzcolumn,web_site,down_date))

            # 本次就是18号 上次全量,因此直接改了个下周需要的时间
            if down_date > '20210329':
                if len(title) == 0:
                    print("title为空不插入")
                else:
                    slist.append((lngid, sub_db_id, batch, rawid, source_type, journal_name, provider_url, title,
                                  title_catalyst, title_alt, abstract, author, pub_year, pub_date, meeting_counts,
                                  meeting_counts_as, index, sub_db, zt_provider, provider, product, country, language,
                                  bzcolumn, web_site, down_date))

            # if publishdate > '20201030':
            #     slist.append((lngid,years,title_c,showwriter,publishdate,bznewspaper,bzcolumn,bzkicker,bzsubtitle,bzpane,bzsort,language,remark))

                if len(slist) > 30000:
                    cur.executemany(sql, slist)
                    conn.commit()
                    print("插入{}条成功".format(len(slist)))
                    slist.clear()

        cur.executemany(sql, slist)
        conn.commit()
        print("插入{}条成功".format(len(slist)))
        slist.clear()


if __name__ == '__main__':
    # for journal_name in ['学习时报']:
    # 北京日报
    for journal_name in ['北京日报', '解放军报', '经济日报', '人民日报', '人民日报海外版', '学习时报', '光明日报', '法制日报', '文汇报', '新华每日电讯', '新华日报',
                         '中国国防报', '中国青年报', '中国教育报', '中国能源报', '农民日报', '科技日报', '工人日报']:
        # for journal_name in ['中国教育报', '中国能源报', '农民日报', '科技日报', '中国科学报', '工人日报']:
        print(journal_name)
        p = Paper2Db3()
        p.get_db3(journal_name)
