#coding:utf-8

import threading
import requests
from bs4 import BeautifulSoup
import re
from biquge.util import getSoup
from biquge.util import dataToDictionaries
from biquge.DBUtil import insertNovel
from biquge.DBUtil import insertNovelItem
from biquge.DBUtil import checkIsExist
import random
import time



class myThread (threading.Thread):   #继承父类threading.Thread
    def __init__(self, threadID, name, counter,urlList,pathList,baseUrl):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.counter = counter
        self.baseUrl = baseUrl
        self.urlList = urlList
        self.pathList = pathList
    def run(self):                   #把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
        print ("Starting " + self.name)
        insertContentToDB(self.urlList[self.threadID],self.pathList[self.threadID],self.baseUrl)
        print ("Exiting " + self.name)


#生成小说txt
def getContentTxt(url,path,baseUrl):
    item_content = requests.get(url)
    item_content.encoding = item_content.apparent_encoding
    content_html_soup=BeautifulSoup(item_content.text,features='html.parser')

    dd_list = content_html_soup.find(id="list").find("dl").find_all('dd')

    for dd_item in dd_list:
        item = dd_item.find('a').attrs.get('href')
        content_item_html = requests.get(baseUrl+item)

        content_item_html.encoding = content_item_html.apparent_encoding
        content_item_html_soup=BeautifulSoup(content_item_html.text,features='html.parser')
        bookname = content_item_html_soup.find(class_="bookname").find('h1').get_text()
        content = re.sub('<br/>','',content_item_html_soup.find( id='content').text)
        with open(path+bookname+'.txt','wb') as f :
            f.write(str(content).encode('utf-8'))


#往数据库里面倒入数据
def insertContentToDB(url,path,baseUrl):
    soup = getSoup(url)
    div_info = soup.find(id="info")
    if div_info is None:
        print(soup)
    #小说名字
    novelName = div_info.find('h1').text
    #小说作者
    author = div_info.find('p').text
    author = author.replace("作    者：","")
    #小说图片
    div_img = soup.find(id="fmimg")
    img = baseUrl+div_img.find('img').get('src')
    # 小说简介
    intro = soup.find(id="intro").find('p').text
    #获取小说类型
    bdsharestr = str(soup.find(class_="con_top").text)
    novel_type_str = re.compile(r'>(.*)>').findall(bdsharestr)
    novel_type=""
    if len(novel_type_str)>0:
        novel_type = str(novel_type_str[0]).strip()
    novel_type = dataToDictionaries(novel_type)

    t = time.time()
    id = ''.join(str(i) for i in random.sample(range(0,9),9))+str(t)

    # 根据小说名称查看小说是否存在，如果存在跳过本次♻️，如果不存在继续♻️
    if checkIsExist(novelName):
        print("有声小说" + novelName + " 已经存在,不再继续爬取数据")
        return

    #往b_novel插入小说数据
    insertNovel(id,novelName,img,author,novel_type,intro)

    #获取小说章节数据

    dd_list = soup.find(id="list").find("dl").find_all('dd')

    i = 0

    for dd_item in dd_list:

        i=i+1

        item = dd_item.find('a').attrs.get('href')

        content_item_html_soup=getSoup(baseUrl+item)
        if content_item_html_soup.find(class_="bookname") is None:
            print(content_item_html_soup)
        bookname = content_item_html_soup.find(class_="bookname").find('h1').get_text()
        content = re.sub('<br/>','',content_item_html_soup.find( id='content').text)

        insertNovelItem(bookname,id,i,content)



