import pymysql
import requests
import lxml.html
from multiprocessing import Pool

# 下载网页
def pagedown(url):
    return requests.get(url).content.decode('gbk')

# 获取soup
def getsoup(url):
    page = pagedown(url)
    soup = BeautifulSoup(page, "lxml")
    return soup

# 获取标题
def gettitle(soup):
    titleList = soup.select("title")
    title = re.findall("<title> (.*?)  (.*?)_.*", str(titleList[0]))
    return title[0][0] + " " + title[0][1]

# 获取内容
def getcontext(soup):
    contextList = soup.select("p")
    context = re.sub("\s|<br/>|<p>|</p>","",str(contextList[0]))
    return context

# 文章
def getessay(soup):
    try:
        title = gettitle(soup)
        context = getcontext(soup)
    except:
        print("error:acquire title or context")
        return None
    return title,context

# 获取同本小说 [url,lastUrl] 间所有url
def geturls(url, lastUrl):
    result1 = re.findall(r'(.*?)(\d\d).html',url)
    result2 = re.findall(r'.*?(\d\d).html', lastUrl)
    preURL = result1[0][0]
    curPage = int(result1[0][1])
    lastUrl = int(result2[0])
    pageList = []
    for i in range(lastUrl - curPage + 1):
        if i+curPage < 10:
            pageList.append(preURL + '0' + str(curPage+i) + '.html')
        else:
            pageList.append(preURL+str(curPage+i) + '.html')
    return pageList

# 获取连接
def getcon():
    return MySQLdb.connect("localhost", "java", "java", "web", charset='utf8')
# 关闭连接
def closecon(con):
    con.close()
# 创建连接池
# 关闭连接池

# 建表
def maketable():
    con = getcon()
    cursor = con.cursor()
    cursor.execute("DROP TABLE IF EXISTS globularLighting;")
    sql = """CREATE TABLE globularLighting (
             title  VARCHAR(30) NOT NULL,
             context  TEXT );"""
    cursor.execute(sql)
    closecon(con)

# 插入数据
def store(title, context):
    con = getcon()
    cursor = con.cursor()
    sql = "INSERT INTO globularLighting(`title`, `context`) \
           VALUES ('%s', '%s')" % (title, context)
    # try:
    cursor.execute(sql)
    con.commit()
    # except:
    #     con.rollback()
    closecon(con)

#
def main():
    url_list = []
    for i in range(7,40):
        if i <10:
            url_list.append("https://www.kanunu8.com/book3/6633/11600{}.html".format(i))
        elif i == 35:   # 当i = 35时，网页已存在，则绕过
            pass
        else:
            url_list.append("https://www.kanunu8.com/book3/6633/1160{}.html".format(i))

    pool = Pool(5)
    output = pool.map(Novel_crawling, [url_list])


if __name__ == "__main__":
    main()
