"""
书生之家-贵州理工
url: http://117.187.11.15/book_list.action
代理: 192.168.30.176:8031

"""
import os
import sys
import time
import re
from bs4 import BeautifulSoup
import bs4

# 将 utils 加入到 PYTHONPATH 中
pwd = os.path.dirname(os.getcwd())
utils_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + ".")
print(utils_path)
sys.path.insert(0, utils_path)
import utils

isbn_reg = re.compile(r'\w+-\w+-\w+-\w')
year_reg = re.compile(r"\d{4}-\d{2}-\d{2}")
publisher_reg = re.compile(r'[\u4e00-\u9fa5]+出版社')

BASE_URL = r'http://117.187.11.15'


class ShuShengDownload(utils.Download):

    def __init__(self):
        self.provider = 'gitshushengbook'
        self.proxy = {"http": "http://192.168.30.176:8031"}
        super().__init__()

    def down_list(self):
        super().down_list()
        url = BASE_URL + "/book_list.action?kindId=&secondQuery=false&pageNo={page}"
        feature = '<td align="CENTER" valign="TOP">'  # 网页特征码
        for page in range(1, 61602):
            filename = self.list_path + '/{page}.html'.format(page=page)
            if os.path.exists(filename):
                print("文件存在->{}".format(filename))
                continue
            print(url.format(page=page))
            resp = utils.get_html(url.format(page=page), proxies=self.proxy)
            if not resp:
                print("没有返回resp 跳出")
                time.sleep(2)
                continue
            # 'gb18030' codec can't decode byte 0xb7 in position 45
            # 不能继续使用 GB18030编码 因为出现不兼容字段 在多线程中已更改  如果以后
            # 继续使用单线程  请更改程序中的GB18030 这里暂不更改
            if resp.text.find(feature) == -1:
                utils.printf("不包含页面特征值：{}".format(feature))
                time.sleep(2)
                continue
            with open(filename, mode='w', encoding="utf-8") as f:
                f.write(resp.text)
            utils.printf("下载第{page}页完成,总共{pages}。".format(page=page, pages=61602))

    def down_detail(self):
        super().down_detail()
        feature = "图书详细信息"
        conn = utils.init_db('mysql', 'gitshusheng')
        cur = conn.cursor()
        cur.execute("select bookid,url from shusheng where stat=0")
        rows = cur.fetchall()
        print("************")
        count = len(rows)
        for bookid, baseurl in rows:
            path = os.path.join(self.detail_path, str(bookid)[:2])
            if not os.path.exists(path):
                os.makedirs(path)
            filename = path + "/" + str(bookid) + ".html"
            if os.path.exists(filename):
                count -= 1
                continue
            url = BASE_URL + "/" + baseurl
            print(url)
            resp = utils.get_html(url)  # proxies=self.proxy
            if not resp:
                print("没有返回值,请注意请求速度")
                continue
            if resp.text.find(feature) == -1:
                utils.printf("不包含页面特征值：{}".format(feature))
                continue
            with open(filename, mode='w', encoding="utf-8") as f:
                f.write(resp.text)
            cur.execute('update shusheng set stat=1 where bookid={}'.format(bookid))
            conn.commit()
            count -= 1
            utils.printf("下砸{bookid}成功,".format(bookid=bookid), "任务还剩{count}".format(count=count))
        conn.close()

    def down_cover(self):
        super().down_cover()
        conn = utils.init_db('mysql', 'gitshusheng')
        cur = conn.cursor()
        cur.execute("select bookid,cover from shusheng where cover_stat=0 and stat>0")
        rows = cur.fetchall()
        for bookid, cover in rows:
            cover_path = self.cover_path + '/' + str(bookid)[:2]
            if not os.path.exists(cover_path):
                os.makedirs(cover_path)
            cover_name = cover_path + '/' + str(bookid) + '.jpg'
            if os.path.exists(cover_name):
                cur.execute("update shusheng set cover_stat=1 where bookid={}".format(bookid))
                conn.commit()
                continue
            if not cover:
                continue
            coverfull = BASE_URL + cover
            print(coverfull)
            print(bookid)
            resp = utils.get_html(coverfull)  # proxies=self.proxy
            if not resp:
                time.sleep(1)
                continue
            elif resp == "404":
                cur.execute("update shusheng set cover_stat=404 where bookid={}".format(bookid))
                conn.commit()
                continue
            with open(cover_name, mode='wb') as f:
                f.write(resp.content)
            cur.execute("update shusheng set cover_stat=1 where bookid={}".format(bookid))
            conn.commit()
        conn.close()


def combine(tuple_):
    return tuple_[0] + "," + tuple_[1] + ',' + tuple_[2] + "\n"


class ShuShengParse(utils.Parse):

    def __init__(self):
        self.provider = 'gitshushengbook'
        self.proxy = {"http": "http://192.168.30.176:8031"}
        self.sqlLists = []
        self.count = 0
        super().__init__()

    def parse_list(self):
        super().parse_list()
        conn = utils.init_db('mysql', 'gitshusheng')
        cur = conn.cursor()
        url_regex = re.compile(r"(book_detail.action\?bookId=(\d+)&kindId=&kindCode=\d+)")
        num = 0
        count = 0
        for _, filename in utils.file_list(self.list_path):
            with open(filename, encoding="utf-8") as f:
                txt = f.read()
            lines = url_regex.findall(txt)
            num += 1
            count += len(lines)
            cur.executemany("insert into shusheng(url,bookid)Values(%s,%s)", lines)
            if num % 1000 == 1:
                conn.commit()
                print("num is {},count is {}".format(str(num), str(count)))
        conn.commit()
        conn.close()
        print("num is {},count is {}".format(str(num), str(count)))

    def parse_detail(self):
        super().parse_detail()
        language = "ZH"
        type = "1"
        medium = "2"
        provider = "mirrorgitshushengbook"
        country = "CN"
        batch = time.strftime('%Y%m%d') + "00"
        stmt = (
            '''insert or ignore into modify_title_info_zt(lngid,rawid,title,creator,description,date,date_created,
            language,country,provider,provider_url,provider_id,type,cover,medium,batch,publisher,identifier_pisbn,title_series,folio_size)  VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);'''
        )
        conn = utils.init_db('sqlite3', self.template_file)
        results = []
        cnt = 0
        for file, fullpath in utils.file_list(self.detail_path):
            print(fullpath)
            with open(fullpath, encoding="utf-8") as fp:
                txt = fp.read()
            (title, description, date_created, publisher, isbn, title_series, creator,
             folio_size) = self._parse_detail_one(txt)
            folio_size = folio_size.replace("开", "").strip()
            basename, _, _ = file.partition('.')
            rawid = basename
            if date_created:
                date = date_created[:4]
            else:
                date = ''
            lngid = "MIRROR_GIT_SHUSHENG_TS_" + rawid
            provider_url = provider + "@" + BASE_URL + "/book_detail.action?bookId=" + rawid
            provider_id = provider + "@" + rawid
            cover_file = rawid[:2] + "/" + rawid + '.jpg'
            if os.path.exists(os.path.join(self.cover_path, cover_file)):
                cover = '/smartlib/' + provider + '/' + cover_file
            else:
                cover = ''
            results.append(
                (
                    lngid, rawid, title, creator, description, date, date_created, language, country, provider,
                    provider_url, provider_id, type, cover, medium, batch, publisher, isbn, title_series, folio_size
                )
            )
            if utils.parse_results_to_sql(conn, stmt, results, 1000):
                cnt += 1001
                utils.printf("已经解析大约 ", cnt, ' 条数据...')
                results.clear()

        utils.parse_results_to_sql(conn, stmt, results)
        cnt += len(results)
        utils.printf("已经解析 ", cnt, ' 条数据...')
        conn.close()

    def _parse_detail_one(self, txt):
        title = ""
        description = ""
        date_created = ""
        publisher = ""
        isbn = ""
        title_series = ""
        creator = ""
        folio_size = ""
        soup = BeautifulSoup(txt, 'lxml')
        intro_tag = soup.select_one('table[height="130"] td[class="12Black"]')
        title_creator_tag = soup.select_one('tr[valign="MIDDLE"] > td[class="12Black"]')
        try:
            description = intro_tag.string.strip()
        except AttributeError as e:
            pass
        for xx in title_creator_tag.find_all("strong"):
            # print(title_creator_tag.find_all("strong"))
            if xx.string.strip().find("丛书") >= 0:
                title_series = xx.next_sibling.strip()
            elif xx.string.strip().find("书名") >= 0:
                title = xx.next_sibling.strip()
                title = title.replace("《", "").replace("》", "")
            elif xx.string.strip().find("作者") >= 0:
                creator = xx.next_sibling.strip()
            elif xx.string.strip().find("开本") >= 0:
                folio_size = xx.next_sibling.strip()
        for item in title_creator_tag.contents:
            if not isinstance(item, bs4.element.Comment):
                continue
            isbn_tmp = isbn_reg.findall(item.string)
            year_tmp = year_reg.findall(item.string)
            publisher_tmp = publisher_reg.findall(item.string)
            if isbn_tmp:
                isbn = isbn_tmp[0]
            if year_tmp:
                date_created = year_tmp[0].replace("-", "")
            if publisher_tmp:
                publisher = publisher_tmp[0]

        return title, description, date_created, publisher, isbn.replace('-', ''), title_series, creator, folio_size

    def update_cover_address(self):
        conn = utils.init_db('mysql', 'gitshusheng')
        cur = conn.cursor()
        for sql in self.sqlLists:
            print(sql)
            cur.execute(sql)
        self.sqlLists = list()
        conn.commit()
        conn.close()

    def _parse_cover_one(self, txt):
        soup = BeautifulSoup(txt, 'lxml')
        tr_tag = soup.find("tr", valign="top")
        if tr_tag:
            imag_tag = tr_tag.find("img")
            if imag_tag:
                url = imag_tag['src']
                print(url)
                return url
            else:
                print("没有发现这个tr标签")
                sys.exit(-1)
        else:
            print("没有发现这个tr标签")
            sys.exit(-1)

    def parse_cover(self):

        print(self.detail_path)
        for file, fullpath in utils.file_list(self.detail_path):
            self.count = self.count + 1
            utils.printf("{count}完成,总共244651".format(count=self.count))
            print(fullpath)
            with open(fullpath, encoding="utf-8") as fp:
                txt = fp.read()
            url = self._parse_cover_one(txt)
            bookid = file.split(os.extsep)[0]
            if url:
                if url == "/images/bookdefault.gif" or url == "/images/musicdefault.gif":
                    continue
                sql = "update shusheng set cover='{}' where bookid='{}'".format(url, bookid)
                self.sqlLists.append(sql)
            if len(self.sqlLists) > 1000:
                self.update_cover_address()
        if len(self.sqlLists) > 0:
            self.update_cover_address()


if __name__ == "__main__":
    down = ShuShengDownload()
    parse = ShuShengParse()
    # down.down_list()
    # parse.parse_list()
    down.down_detail()
    parse.parse_cover()
    down.down_cover()
    parse.parse_detail()
