import requests
from bs4 import BeautifulSoup
import re
import pymssql as ms
import 笔趣阁 as bqg
import 搜搜小说网 as ss
import 第二课堂 as xbqg

sqlparms = {
    "host": "127.0.0.1",
    "user": "sa",
    "password": "zxczxczxc1",
    "database": "SNDB",
    "charset": "utf8",
    "autocommit": True
}
# 网页正则表达式
rebqg = r'https://www.boquge.com\S'
rexbqg = r'http://www.xbiquge.la\S'
ress = r'https://www.soxs.cc\S'


def AppendBook(BcList, bkName):
    BcList.append(bqg.GetSearchResult(bkName))
    # BcList.append(ss.GetSearchPageResultByName(bkName))
    BcList.append(xbqg.GetSearchResult(bkName))


# 返回查到的行数据
def selectmssql(sql, val, **dbinfo):
    connect = ms.connect(**dbinfo)
    cursor = connect.cursor(as_dict=True)
    try:
        cursor.execute(sql, val)
    except Exception as err:
        connect.rollback()
        print(err)
    finally:
        rows = []
        row = cursor.fetchone()
        while row:
            rows.append(row)
            row = cursor.fetchone()
        cursor.close()
        connect.close()
        return rows


def myssqlUpdate(sql, val, **dbinfo):
    connect = ms.connect(**dbinfo)
    cursor = connect.cursor()
    try:
        cursor.execute(sql, val)
    except Exception as err:
        connect.rollback()
        print(err)
    finally:
        cursor.close()
        connect.close()


def ExcuteOneSQLAndOnePrams(sql, val):
    connect = ms.connect(sqlparms)
    cursor = connect.cursor()
    try:
        cursor.execute(sql, val)
    except Exception as err:
        connect.rollback()
        print(err)
    finally:
        cursor.close()
        connect.close()


def save2mysql(sql, val, **dbinfo):
    connect = ms.connect(**dbinfo)
    cursor = connect.cursor()
    try:
        cursor.executemany(sql, val)
    except Exception as err:
        connect.rollback()
        print(err)

    finally:
        cursor.close()
        connect.close()


def SaveBookInfo(b):
    save2mysql("Insert into BooksInfo Values(%s,%s,%s,%s,%s,%s,%s)",
               [(b.name, b.type1, b.author, b.source, b.href, b.utime, 1)], **sqlparms)
    b.ID = selectmssql("Select BID From BooksInfo Where Name=%s and Author=%s",
                       (b.name, b.author), **sqlparms)[0]['BID']


# 保存章节
def Save(zjc, bid):
    paras = []
    sql = "INSERT [dbo].[ChapterInfo] ([Num], [Name], [href], [IsFinish], [MainBody], [BID], [IsAlive]) VALUES(%s,%s," \
          "%s,%s,%s,%s,%s) "
    for zj in zjc:
        para = (zj.num, zj.name, zj.href, 0, zj.MainBody, bid, 1)
        paras.append(para)
    save2mysql(sql, paras, **sqlparms)


headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/83.0.4103.61 Safari/537.36'}


# 获取网页源码
def GetHtmlCode(link):
    # link="https://b.faloo.com/p/655435/1.html"

    r = requests.get(link, headers=headers)
    # print(r.encoding)
    return r


# 获取下一章
def GetNext(soup1, parentslink):
    l2 = parentslink + soup1.find('a', text='下一章')['href']
    return l2


def Getsoup(code):
    # codetxt = code.text.encode('gbk').decode('utf-8')
    # print(codetxt)
    # 去<br>在网页显示的时候需要去掉
    # codetxt = re.sub("<br/>", "\n", code.text)
    soup = BeautifulSoup(code.text, "html.parser")
    return soup


def GetBSoup(link):
    return Getsoup(GetHtmlCode(link))


def Update(zjc, bid, href):
    if re.match(ress, href, re.I) is not None:
        zjc = ss.GetBody(zjc)
    elif re.match(rexbqg, href, re.I) is not None:
        zjc = xbqg.GetBody(zjc)
    elif re.match(rebqg, href, re.I) is not None:
        zjc = bqg.GetBody(zjc)
    # 保存
    Save(zjc, bid)


# 获取书籍最新章节
# def check(book):
#     Chapters = []
#     if book['Source'] == '笔趣阁':
#         Chapters = sorted(list(bqg.ZJIndex(book['href'])), key=lambda c: c.num)
#     elif book['Source'] == '搜书网':
#         Chapters = sorted(list(ss.GetChapterList(book['href'])), key=lambda c: c.num)
#         source = 1
#     elif book['Source'] == '新笔趣阁':
#         Chapters = sorted(list(xbqg.GetChapterList(book['href'])), key=lambda c: c.num)
#         source = 2
#     return Chapters


# 根据LINk 获取章节列表
def check(href):
    Chapters = []
    if re.match(rebqg, href, re.I) is not None:
        Chapters = sorted(list(bqg.ZJIndex(href)), key=lambda c: c.num)
    elif re.match(ress, href, re.I) is not None:
        Chapters = sorted(list(ss.GetChapterList(href)), key=lambda c: c.num)
    elif re.match(rexbqg, href, re.I) is not None:
        Chapters = sorted(list(xbqg.GetChapterList(href)), key=lambda c: c.num)
    return Chapters
