"""
书生之家--宁波大学采集任务
url：http://10.22.0.25:81/book_list.action?kindId=&secondQuery=false&pageNo=300
站点资源数量：280000
采集要求：全站图书250000余本，封面跟详情页
开动时间：20171128
负责人：venter.zhu
"""
import os
import sys
import time
import re
from bs4 import BeautifulSoup
import bs4

# 将 utils 加入到 PYTHONPATH 中
pwd = os.path.dirname(os.getcwd())
utils_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + ".")
sys.path.insert(0, utils_path)
import utils

isbn_reg = re.compile(r'\w+-\w+-\w+-\w')
year_reg = re.compile(r"\d{4}-\d{2}-\d{2}")
publisher_reg = re.compile(r'[\u4e00-\u9fa5]+出版社')


class ShuShengDownload(utils.Download):
    proxy = {"http": "http://192.168.0.71:8135"}

    def down_list(self):
        super().down_list()
        base_url = "http://10.22.0.25:81/book_list.action?kindId=&secondQuery=false&pageNo={page}"
        feature = '<td align="CENTER" valign="TOP">'  #网页特征码
        for page in range(1, 23539):
            filename = self.list_path + '/{page}.html'.format(page=page)
            if os.path.exists(filename):
                continue
            resp = utils.get_html(base_url.format(page=page), proxies=self.proxy)
            if not resp:
                time.sleep(2)
                continue
            if resp.content.decode("GB18030").find(feature) == -1:
                time.sleep(2)
                utils.printf("不包含页面特征值：{}".format(feature))
                continue
            with open(filename, mode='w', encoding='GB18030') as f:
                f.write(resp.content.decode("GB18030"))
            utils.printf("下载第{page}页完成,总共{pages}。".format(page=page, pages=23539))
            time.sleep(2)

    def down_detail(self):
        super().down_detail()
        feature = "图书详细信息"
        conn = utils.init_db('mysql', 'nbu')
        cur = conn.cursor()
        cur.execute("select id,bookid,url from shusheng_copy where stat=0")
        rows = cur.fetchall()
        count = len(rows)
        for id, bookid, baseurl in rows:
            path = os.path.join(self.detail_path, str(bookid)[:2])
            if not os.path.exists(path):
                os.makedirs(path)
            filename = path + "/" + str(bookid) + ".html"
            if os.path.exists(filename):
                continue
            url = "http://10.22.0.25:81/" + baseurl
            resp = utils.get_html(url, proxies=self.proxy)
            if not resp:
                time.sleep(2)
                continue
            if resp.content.decode("GB18030").find(feature) == -1:
                time.sleep(2)
                utils.printf("不包含页面特征值：{}".format(feature))
                continue
            with open(filename, mode='w', encoding='GB18030') as f:
                f.write(resp.content.decode("GB18030"))
            cur.execute('update shusheng_copy set stat=1 where id={}'.format(bookid))
            conn.commit()
            count -= 1
            utils.printf("下砸{bookid}成功,".format(bookid=bookid), "任务还剩{count}".format(count=count))
            time.sleep(2)
        conn.close()

    def down_cover(self):
        super().down_cover()
        conn = utils.init_db('mysql', 'nbu')
        cur = conn.cursor()
        cur.execute("select bookid,cover from shusheng where cover_stat=0")
        rows = cur.fetchall()
        for bookid, cover in rows:
            cover_path = self.cover_path + '/' + str(bookid)[:2]
            if not os.path.exists(cover_path):
                os.makedirs(cover_path)
            cover_name = cover_path + '/' + str(bookid) + '.jpg'
            if os.path.exists(cover_name):
                cur.execute("update shusheng set cover_stat=1 where id={}".format(bookid))
                conn.commit()
                continue
            resp = utils.get_html(cover, proxies=self.proxy)
            if not resp:
                time.sleep(1)
                continue
            with open(cover_name, mode='wb') as f:
                f.write(resp.content)
            cur.execute("update shusheng set cover_stat=1 where id={}".format(bookid))
            conn.commit()
            print(cover_name)
            time.sleep(1)
        conn.close()


def combine(tuple_):
    return tuple_[0] + "," + tuple_[1] + ',' + tuple_[2] + "\n"


class ShuShengParse(utils.Parse):

    def parse_list(self):
        super().parse_list()
        conn = utils.init_db('mysql', 'nbu')
        cur = conn.cursor()
        base_url = "http://10.22.0.25:81/"
        url_regex = re.compile(r"(book_detail.action\?bookId=(\d+)&kindId=&kindCode=(\d+))")
        for _, filename in utils.file_list(self.list_path):
            with open(filename, encoding='GB18030') as f:
                txt = f.read()
            lines = url_regex.findall(txt)
            # with open("./list_shusheng.txt", mode='a', encoding='utf8') as fp:
            # fp.writelines(lines)
            sql = "insert IGNORE into shusheng_copy(url,bookid,code)Values(%s,%s,%s)", lines
            print(sql)
            cur.executemany("insert IGNORE into shusheng_copy(url,bookid,code)Values(%s,%s,%s)", lines)
        conn.commit()
        conn.close()

    def parse_detail(self):
        super().parse_detail()
        language = "ZH"
        type = "1"
        medium = "2"
        # 程序名
        provider = "nbushushengbook"
        # 国家
        country = "CN"
        # 时间 2018051700
        batch = time.strftime('%Y%m%d') + "00"
        stmt = (
            '''insert or ignore into modify_title_info_zt(lngid,rawid,title,creator,description,date,date_created,
            language,country,provider,provider_url,provider_id,type,cover,medium,batch,publisher,identifier_pisbn,title_series)  VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);'''
        )
        conn = utils.init_db('sqlite3', self.template_file)
        results = []
        cnt = 0
        # 返回文件名和全路径文件名
        for file, fullpath in utils.file_list(self.detail_path):
            with open(fullpath, encoding='GB18030') as fp:
                txt = fp.read()
            (title, description, date_created, publisher, isbn, title_series, creator) = self._parse_detail_one(txt)
            print(self._parse_detail_one(txt))
            # 分割文件，取前面部分
            basename, _, _ = file.partition('.')
            rawid = basename
            # 取年份
            if date_created:
                date = date_created[:4]
            else:
                date = ''
            lngid = "NBU_SHUSHENG_TS_" + rawid
            provider_url = provider + "@http://10.22.0.25:81/book_detail.action?bookId=" + rawid
            provider_id = provider + "@" + rawid
            #  不太
            cover_file = rawid[:2] + "/" + rawid + '.jpg'
            if os.path.exists(os.path.join(self.cover_path, cover_file)):
                cover = '/smartlib/' + provider + '/' + cover_file
            else:
                cover = ''
            results.append((
                lngid, rawid, title, creator, description, date, date_created, language, country, provider,
                provider_url, provider_id, type, cover, medium, batch, publisher, isbn, title_series))
            if utils.parse_results_to_sql(conn, stmt, results, 1000):
                cnt += 1001
                utils.printf("已经解析大约 ", cnt, ' 条数据...')
                results.clear()

        utils.parse_results_to_sql(conn, stmt, results)
        cnt += len(results)
        utils.printf("已经解析 ", cnt, ' 条数据...')
        conn.close()

    def _parse_detail_one(self, txt):
        # 书名
        title = ""
        description = ""
        date_created = ""
        publisher = ""
        isbn = ""
        #丛书
        title_series = ""
        # 作者
        creator = ""
        # 开本
        folio_size = ""
        soup = BeautifulSoup(txt, 'lxml')
        intro_tag = soup.select_one('table[height="130"] td[class="12Black"]')
        title_creator_tag = soup.select_one('tr[valign="MIDDLE"] > td[class="12Black"]')
        try:
            description = intro_tag.string.strip()
        except AttributeError as e :
            pass
        for xx in title_creator_tag.find_all("strong"):
            # print(title_creator_tag.find_all("strong"))
            if xx.string.strip().find("丛书") >= 0:
                title_series = xx.next_sibling.strip()
            elif xx.string.strip().find("书名") >= 0:
                title = xx.next_sibling.strip()
                title = title.replace("《", "").replace("》", "")
            elif xx.string.strip().find("作者") >= 0:
                creator = xx.next_sibling.strip()
            elif xx.string.strip().find("开本") >=0:
                folio_size = xx.next_sibling.strip()
        for item in title_creator_tag.contents:
            # bs4.element.Comment为注释类型
            if not isinstance(item, bs4.element.Comment):
                continue
            # 匹配注释中的ISBN号
            isbn_tmp = isbn_reg.findall(item.string)
            # 匹配注释中的日期及出版日期
            year_tmp = year_reg.findall(item.string)
            # 匹配注释中的出版社
            publisher_tmp = publisher_reg.findall(item.string)
            if isbn_tmp:
                isbn = isbn_tmp[0]
            if year_tmp:
                date_created = year_tmp[0].replace("-", "")
            if publisher_tmp:
                publisher = publisher_tmp[0]

        return title, description, date_created, publisher, isbn.replace('-', ''), title_series, creator


if __name__ == "__main__":
    down = ShuShengDownload()
    parse = ShuShengParse()
    down.down_list()
    parse.parse_list()
    down.down_detail()
    parse.parse_detail()
    # # with open("")
    down.down_cover()