"""
翻页依赖于上一页的参数  如果某个之母的页数不对
删除该字母的所有html网页 重新下载
"""
import os
import sqlite3
import time

from bs4 import BeautifulSoup
from xjlibrary.mysqlmoudel.simple.mysqlclient import MySqlDbConnect, SelctSqlFromDB
from xjlibrary.tools.BaseFile import BaseFile
from xjlibrary.tools.BaseUrl import BaseUrl
from xjlibrary.tools.filename import getDirAllFiles, get_filename_not_extsep

curPath = BaseFile.get_path_absolute(__file__)
TopPath = BaseFile.get_top_path(curPath, -2)
sPath = BaseFile.getNewPath(TopPath, "download", "tianfang", "download", "book")
cover_path = BaseFile.getNewPath(TopPath, "download", "tianfang", "download", "cover")

results = []
# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")

# 数据库链接


def InsertSqlToDB3():
    global results
    stmt = (
        '''insert or ignore into modify_title_info_zt(lngid,rawid,title,description,
        language,country,provider,provider_url,provider_id,type,cover,medium,batch,publisher,provider_subject,date,date_created)  VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);'''
    )
    try:
        conn = sqlite3.connect('mirrordqlibtingbookvideo.db3')
        cur = conn.cursor()
        cur.executemany(stmt, results)
        conn.commit()
        cur.close()
        conn.close()
        results = []
    except Exception as e:
        raise e


def parahtml(filePath):
    global ListSql
    print(filePath)
    charName = get_filename_not_extsep(filepath)
    charname = charName.split("_")[0]
    sql= "select `filename` from `book` where bookname='{}'".format(charname)
    rows = SelctSqlFromDB(sql,MajorDbConnect())
    provider_subject = rows[0][0]
    language = "ZH"
    # 编号10 多媒体
    type = "10"
    # 1实体 2数字
    medium = "2"
    provider = "mirrordqlibtingbookvideo"
    # 国家
    country = "CN"
    date = "1900"
    date_created = '19000000'
    # 时间 2018051700
    batch = time.strftime('%Y%m%d') + "00"
    soup = BeautifulSoup(open(filePath, 'r', encoding='utf-8'), "lxml")
    span_strings = "".join(
        soup.find("span", attrs={"style": "font-size:14px; padding-right:200px;  "}).stripped_strings).replace(
        "您当前选择的分类：", "")
    table_tag = soup.find("table")
    tr_all_tag = table_tag.find_all("tr")
    tr_all_tag = tr_all_tag[1:-1]
    tr_all_tag = [tr_all_tag[i:i + 4] for i in range(0, len(tr_all_tag), 4)]
    for tr in tr_all_tag:
        imag_src = tr[0].img["src"]
        a_tag = tr[0].find("a", class_="red")
        href = a_tag["href"]
        title = "".join(a_tag.stripped_strings)
        # 摘要
        content = "".join(tr[1].stripped_strings)

        rawid = BaseUrl.urlQuery2Dict(href)["id"]
        lngid = "MIRROR_DQLIB_TINGBOOK_DMT_" + rawid
        description = content
        title = charname + "_" + title
        provider_url = provider + "@http://10.38.48.163/tingbook/play.aspx?id=" + rawid
        provider_id = provider + "@" + rawid
        listurl = BaseUrl.urlPath2List(imag_src)[-2:]
        image_file_name = "_".join(listurl).replace("_picture", "").replace(".gif", ".jpg")
        if os.path.exists(os.path.join(cover_path, image_file_name)):
            cover = '/smartlib/' + provider + '/' + image_file_name
        else:
            cover = ''
        publisher = "盛大天方科技有限公司"
        results.append((
            lngid, rawid, title, description, language, country, provider, provider_url, provider_id, type, cover,
            medium, batch, publisher, provider_subject, date, date_created))
    if len(results) > 100:
        InsertSqlToDB3()


if __name__ == "__main__":
    for filepath in getDirAllFiles(sPath):
        parahtml(filepath)
    InsertSqlToDB3()
