"""
书生之家--浙江万里学院采集任务
http://lib.zwu.edu.cn/256/list.htm

url：http://10.60.154.22:81/book_listforward.action?kindId=0
站点资源数量：244651
采集要求：全站图书244651余本，封面跟详情页
开动时间：20180518
负责人：xujiang
"""
# 将 utils 加入到 PYTHONPATH 中
import os
import re
import sys
import threading
import time

import bs4
import chardet
from bs4 import BeautifulSoup

pwd = os.path.dirname(os.getcwd())
print(os.getcwd())
print(pwd)
utils_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + ".")
print(utils_path)
sys.path.insert(0, utils_path)

import utils


isbn_reg = re.compile(r'\w+-\w+-\w+-\w')
year_reg = re.compile(r"\d{4}-\d{2}-\d{2}")
publisher_reg = re.compile(r'[\u4e00-\u9fa5]+出版社')

class ShuShengDownload(utils.Download):
    def __init__(self):
        self.provider = "zjwlshushengbook"
        self.proxy = {"http": "http://192.168.0.72:8119"}
        self.base_url = "http://10.60.154.22:81/book_list.action?zhongtuQuery=false&kindId=&secondQuery=false&pageNo={page}"
        self.feature = '<td align="CENTER" valign="TOP">'  # 网页特征码
        super().__init__()
        super().down_list()

    def down_list(self):
        for page in range(1, 20389):
            self.down_list_run(page)

    def down_list_run(self, page):
        thread = threading.current_thread()
        filename = self.list_path + '/{page}.html'.format(page=page)
        print("{threading}:{filename}".format(threading=thread.getName(), filename=filename))
        if os.path.exists(filename):
            return
        resp = utils.get_html(self.base_url.format(page=page), proxies=self.proxy)
        if not resp:
            time.sleep(2)
            return

        if resp.content.decode("GB18030").find(self.feature) == -1:
            time.sleep(2)
            utils.printf("{threading}不包含页面特征值：{feature}".format(threading=thread.getName(), feature=self.feature))
            return
        with open(filename, mode='w', encoding='GB18030') as f:
            f.write(resp.content.decode("GB18030"))
        utils.printf("{threading}下载第{page}页完成,总共{pages}。".format(threading=thread.getName(), page=page, pages=20389))
        time.sleep(2)

    def down_cover(self):
        super().down_cover()
        conn = utils.init_db('mysql', 'zjwl')
        cur = conn.cursor()
        cur.execute("select bookid,cover from shusheng where cover_stat=0")
        rows = cur.fetchall()
        for bookid, cover in rows:
            cover_path = self.cover_path + '/' + str(bookid)[:2]
            if not os.path.exists(cover_path):
                os.makedirs(cover_path)
            cover_name = cover_path + '/' + str(bookid) + '.jpg'
            if os.path.exists(cover_name):
                cur.execute("update shusheng set cover_stat=1 where id={}".format(bookid))
                conn.commit()
                continue
            resp = utils.get_html(cover, proxies=self.proxy)
            if not resp:
                time.sleep(1)
                continue
            with open(cover_name, mode='wb') as f:
                f.write(resp.content)
            cur.execute("update shusheng set cover_stat=1 where id={}".format(bookid))
            conn.commit()
            print(cover_name)
            time.sleep(1)
        conn.close()

    def down_detail(self,stat):
        super().down_detail()

        conn = utils.init_db('mysql', 'zjwl')
        cur = conn.cursor()
        cur.execute("select bookid,url from shusheng where stat={}".format(stat))
        rows = cur.fetchall()
        count = len(rows)
        for bookid, baseurl in rows:
            self.down_detail_run(bookid, baseurl, conn, cur, count)
            count -= 1
        conn.close()

    def down_detail_run(self, bookid, baseurl, conn, cur, i):
        feature = "图书详细信息"
        thread = threading.current_thread()
        path = os.path.join(self.detail_path, str(bookid)[:2])
        if not os.path.exists(path):
            os.makedirs(path)
        filename = path + "/" + str(bookid) + ".html"
        if os.path.exists(filename):
            sql = 'update shusheng set stat=1 where bookid={}'.format(bookid)
            try:
                cur.execute(sql)
                conn.commit()
            except:
                print("{threading}bytearray index out of range".format(threading=thread.getName()))

            return
        url = "http://10.60.154.22:81/" + baseurl
        resp = utils.get_html(url, proxies=self.proxy)
        if not resp:
            time.sleep(2)
            return
        if resp.content.decode("GB18030", "ignore").find(feature) == -1:
            time.sleep(2)
            utils.printf("不包含页面特征值：{}".format(feature))
            return
        with open(filename, mode='w', encoding='GB18030') as f:
            f.write(resp.content.decode("GB18030"))
        sql = 'update shusheng set stat=1 where bookid={}'.format(bookid)
        utils.printf("{threading},{sql}".format(threading=thread.getName(), sql=sql))
        try:
            cur.execute(sql)
            conn.commit()
        except:
            print("{threading}bytearray index out of range".format(threading=thread.getName()))
            return
        utils.printf("{threading}下砸{bookid}成功,".format(threading=thread.getName(), bookid=bookid),
                     "任务还剩{count}".format(count=i))
        time.sleep(1)



class ShuShengParse(utils.Parse):

    def __init__(self):
        self.provider = "zjwlshushengbook"
        super().__init__()

    def parse_list(self):
        super().parse_list()
        conn = utils.init_db('mysql', 'zjwl')
        cur = conn.cursor()
        url_regex = re.compile(r"(book_detail.action\?bookId=(\d+)&kindId=&kindCode=(\d+))")
        count = 0
        for file, filename in utils.file_list(self.list_path):
            # filename 包含路径
            # file 不包含路径
            with open(filename, encoding='GB18030') as f:
                txt = f.read()
            pagenumber = file.split(os.extsep)[0]
            # 通过正则获取对应的数据
            lines = url_regex.findall(txt)
            linelist = []
            for line in lines:
                line = line + (pagenumber,)
                linelist.append(line)
            lines = linelist
            # with open("./list_shusheng.txt", mode='a', encoding='utf8') as fp:
            # fp.writelines(lines)
            sql = "insert IGNORE into shusheng(url,bookid,code,page)Values(%s,%s,%s,%s)", lines
            print(sql)
            cur.executemany("insert IGNORE into shusheng(url,bookid,code,page)Values(%s,%s,%s,%s)", lines)
            count = count + 1
            utils.printf("{page}页完成,总共20388".format(page=count))
        conn.commit()
        conn.close()

    def parse_detail(self):
        super().parse_detail()
        language = "ZH"
        type = "1"
        medium = "2"
        # 程序名
        provider = "zjwlshushengbook"
        # 国家
        country = "CN"
        # 时间 2018051700
        batch = time.strftime('%Y%m%d') + "00"
        stmt = (
            '''insert or ignore into modify_title_info_zt(lngid,rawid,title,creator,description,date,date_created,
            language,country,provider,provider_url,provider_id,type,cover,medium,batch,publisher,identifier_pisbn,title_series)  VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);'''
        )
        conn = utils.init_db('sqlite3', self.template_file)
        results = []
        cnt = 0
        # 返回文件名和全路径文件名
        for file, fullpath in utils.file_list(self.detail_path):
            with open(fullpath, encoding='GB18030') as fp:
                txt = fp.read()
            (title, description, date_created, publisher, isbn, title_series, creator) = self._parse_detail_one(txt)
            print(self._parse_detail_one(txt))
            # 分割文件，取前面部分
            basename, _, _ = file.partition('.')
            rawid = basename
            # 取年份
            if date_created:
                date = date_created[:4]
            else:
                date = ''
            lngid = "ZJWL_SHUSHENG_TS_" + rawid
            provider_url = provider + "@http://10.60.154.22:81/book_detail.action?bookId=" + rawid
            provider_id = provider + "@" + rawid
            #  不太
            cover_file = rawid[:2] + "/" + rawid + '.jpg'
            if os.path.exists(os.path.join(self.cover_path, cover_file)):
                cover = '/smartlib/' + provider + '/' + cover_file
            else:
                cover = ''
            results.append((
                lngid, rawid, title, creator, description, date, date_created, language, country, provider,
                provider_url, provider_id, type, cover, medium, batch, publisher, isbn, title_series))
            if utils.parse_results_to_sql(conn, stmt, results, 1000):
                cnt += 1001
                utils.printf("已经解析大约 ", cnt, ' 条数据...')
                results.clear()

        utils.parse_results_to_sql(conn, stmt, results)
        cnt += len(results)
        utils.printf("已经解析 ", cnt, ' 条数据...')
        conn.close()

    def _parse_detail_one(self, txt):
        # 书名
        title = ""
        description = ""
        date_created = ""
        publisher = ""
        isbn = ""
        # 丛书
        title_series = ""
        # 作者
        creator = ""
        # 开本
        folio_size = ""
        soup = BeautifulSoup(txt, 'lxml')
        intro_tag = soup.select_one('table[height="130"] td[class="12Black"]')
        # 包括书名 作者 开本 在内的很多信息块
        title_creator_tag = soup.select_one('tr[valign="MIDDLE"] > td[class="12Black"]')
        try:
            # 内容摘要
            description = intro_tag.string.strip()
        except AttributeError as e:
            pass
        # 目前本页strong标签 书名 作者 开本
        for xx in title_creator_tag.find_all("strong"):
            # print(title_creator_tag.find_all("strong"))
            if xx.string.strip().find("丛书") >= 0:
                title_series = xx.next_sibling.strip()
            elif xx.string.strip().find("书名") >= 0:
                title = xx.next_sibling.strip()
                title = title.replace("《", "").replace("》", "")
            elif xx.string.strip().find("作者") >= 0:
                creator = xx.next_sibling.strip()
            elif xx.string.strip().find("开本") >= 0:
                folio_size = xx.next_sibling.strip()
        for item in title_creator_tag.contents:
            # bs4.element.Comment为注释类型
            if not isinstance(item, bs4.element.Comment):
                continue
            # 匹配注释中的ISBN号
            isbn_tmp = isbn_reg.findall(item.string)
            # 匹配注释中的日期及出版日期
            year_tmp = year_reg.findall(item.string)
            # 匹配注释中的出版社
            publisher_tmp = publisher_reg.findall(item.string)
            if isbn_tmp:
                isbn = isbn_tmp[0]
            if year_tmp:
                date_created = year_tmp[0].replace("-", "")
            if publisher_tmp:
                publisher = publisher_tmp[0]

        return title, description, date_created, publisher, isbn.replace('-', ''), title_series, creator


if __name__ == "__main__":
    down = ShuShengDownload()
    # down.down_list()
    parse = ShuShengParse()
    parse.parse_list()
    # number = sys.argv[1]
    down.down_detail(number)
