"""
书生之家--劳动关系学院采集任务
url：http://202.204.18.15/book_list.action?kindId=&secondQuery=false&pageNo=300
站点资源数量：40677
采集要求：全站图书23000余本，目前只采集ID
开动时间：20171026
负责人：venter.zhu
"""
import os
import sys
import time
import re
from bs4 import BeautifulSoup

# 将 utils 加入到 PYTHONPATH 中
pwd = os.path.dirname(os.getcwd())
utils_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + ".")
sys.path.insert(0, utils_path)
import utils


class ShuShengDownload(utils.Download):
    proxy = {"http": "http://192.168.0.71:8046"}

    def down_list(self):
        super().down_list()
        base_url = "http://202.204.18.15/book_list.action?kindId=&secondQuery=false&pageNo={page}"
        feature = '<td align="CENTER" valign="TOP">'  #网页特征码
        for page in range(1, 3391):
            filename = self.list_path + '/{page}.html'.format(page=page)
            if os.path.exists(filename):
                continue
            resp = utils.get_html(base_url.format(page=page), proxies=self.proxy)
            if not resp:
                time.sleep(2)
                continue
            if resp.content.decode("GB18030").find(feature) == -1:
                time.sleep(2)
                utils.printf("不包含页面特征值：{}".format(feature))
                continue
            with open(filename, mode='w', encoding='GB18030') as f:
                f.write(resp.content.decode("GB18030"))
            utils.printf("下载第{page}页完成,总共{pages}。".format(page=page, pages=3391))
            time.sleep(2)

    def down_detail(self):
        super().down_detail()
        feature = "图书详细信息"
        conn = utils.init_db('mysql', 'cirr')
        cur = conn.cursor()
        cur.execute("select id,url from shusheng where stat=0")
        rows = cur.fetchall()
        count = len(rows)
        for bookid, baseurl in rows:
            path = os.path.join(self.detail_path, str(bookid)[0])
            if not os.path.exists(path):
                os.makedirs(path)
            filename = path + "/" + str(bookid) + ".html"
            if os.path.exists(filename):
                continue
            url = "http://202.204.18.15/" + baseurl
            resp = utils.get_html(url, proxies=self.proxy)
            if not resp:
                time.sleep(2)
                continue
            if resp.content.decode("GB18030").find(feature) == -1:
                time.sleep(2)
                utils.printf("不包含页面特征值：{}".format(feature))
                continue
            with open(filename, mode='w', encoding='GB18030') as f:
                f.write(resp.content.decode("GB18030"))
            cur.execute('update shusheng set stat=1 where id={}'.format(bookid))
            conn.commit()
            count -= 1
            utils.printf("下砸{bookid}成功,".format(bookid=bookid), "任务还剩{count}".format(count=count))
            time.sleep(2)
        conn.close()

    def down_cover(self):
        super().down_cover()
        conn = utils.init_db('mysql', 'cirr')
        cur = conn.cursor()
        cur.execute("select id,cover from shusheng where cover_stat=0")
        rows = cur.fetchall()
        for id_, cover in rows:
            cover_path = self.cover_path + '/' + str(id_)[0]
            cover_name = cover_path + '/' + str(id_) + '.jpg'
            if os.path.exists(cover_name):
                cur.execute("update shusheng set cover_stat=1 where id={}".format(id_))
                conn.commit()
                continue
            resp = utils.get_html(cover, proxies=self.proxy)
            if not resp:
                time.sleep(1)
                continue
            with open(cover_name, mode='wb') as f:
                f.write(resp.content)
            cur.execute("update shusheng set cover_stat=1 where id={}".format(id_))
            conn.commit()
            print(cover_name)
            time.sleep(1)
        conn.close()


def combine(tuple_):
    return tuple_[0] + "," + tuple_[1] + ',' + tuple_[2] + "\n"


class ShuShengParse(utils.Parse):

    def parse_list(self):
        super().parse_list()
        base_url = "http://202.204.18.15/"
        url_regex = re.compile(r"(book_detail.action\?bookId=(\d+)&kindId=&kindCode=(\d+))")
        for _, filename in utils.file_list(self.list_path):
            with open(filename, encoding='GB18030') as f:
                txt = f.read()
            lines = map(combine, url_regex.findall(txt))
            with open("./list_shusheng.txt", mode='a', encoding='utf8') as fp:
                fp.writelines(lines)

    def parse_detail(self):
        super().parse_detail()
        language = "ZH"
        type = "1"
        medium = "2"
        provider = "ciirshushengbook"
        country = "CN"
        batch = time.strftime('%Y%m%d') + "00"
        stmt = (
            '''insert into modify_title_info_zt(lngid,rawid,title,creator,description,date,date_created,
            language,country,provider,provider_url,provider_id,type,cover,medium,batch) 
                VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);'''
        )
        conn = utils.init_db('sqlite3', self.template_file)
        results = []
        cnt = 0
        for file, fullpath in utils.file_list(self.detail_path):
            with open(fullpath, encoding='GB18030') as fp:
                txt = fp.read()
            title, creator, description = self._parse_detail_one(txt)
            basename, _, ext = file.partition('.')
            rawid = basename
            date = "1900"
            date_created = "19000000"
            lngid = "CIIR_SHUSHENG_TS_" + rawid
            provider_url = provider + "@http://202.204.18.15/book_detail.action?bookId=" + rawid
            provider_id = provider + "@" + rawid
            cover_file = rawid[0] + "/" + rawid + '.jpg'
            if os.path.exists(os.path.join(self.cover_path, cover_file)):
                cover = '/smartlib/' + provider + '/' + cover_file
            else:
                cover = ''
            results.append(
                (
                    lngid, rawid, title, creator, description, date, date_created, language, country, provider,
                    provider_url, provider_id, type, cover, medium, batch
                )
            )
            if utils.parse_results_to_sql(conn, stmt, results, 1000):
                cnt += 1001
                results.clear()
            if cnt % 1000 == 1:
                utils.printf("已经解析大约 ", cnt, ' 条数据...')
        utils.parse_results_to_sql(conn, stmt, results)
        cnt += len(results)
        utils.printf("已经解析 ", cnt, ' 条数据...')
        conn.close()

    def _parse_detail_one(self, txt):
        soup = BeautifulSoup(txt, 'lxml')
        intro_tag = soup.select_one('table[height="130"] td[class="12Black"]')
        title_creator_tag = soup.select_one('tr[valign="MIDDLE"] > td[class="12Black"]')
        description = intro_tag.string.strip() if intro_tag.string else ""
        title_tmp, creator_tmp, _ = title_creator_tag.stripped_strings
        title = title_tmp.split('书名：《')[1].strip("》").strip() if title_tmp else ""
        creator = creator_tmp.split("作者：")[1].strip() if creator_tmp else ""
        return title, creator, description


if __name__ == "__main__":
    down = ShuShengDownload()
    parse = ShuShengParse()
    down.down_list()
    parse.parse_list()

    down.down_detail()
    parse.parse_detail()
    # with open("")
    # down.down_cover()