#导入所需模块
import requests
from bs4 import BeautifulSoup
import random
import time
import re
from elasticsearch import Elasticsearch
# from es import NovelType

#连接es
es=Elasticsearch("http://localhost:9200")
#定义分析器
def gen_sugg(index, info_tuple):
    # 根据字符串生成搜索建议数组
    usedWds = set()  # 用于去重，以先到的权重为准
    suggestion = []
    for txt, weight in info_tuple:
        if txt:
            # 调用ES analyze接口分析字符串
            result = es.indices.analyze(index=index,
                                        body={'text': "{0}".format(txt), 'analyzer': "ik_max_word"})
            analyWds = set([r["token"] for r in result["tokens"] if len(r["token"]) > 1])  # 过滤单字
            newWds = analyWds - usedWds
        else:
            newWds = set()
        if newWds:
            suggestion.append({"input": list(newWds), "weight": weight})
    return suggestion
#2.编写请求头
user_agents = [
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
    "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
request_headers={
    "User-Agent":random.choice(user_agents),
    "Connection":"keep-alive",
    "Referer":"https://www.douban.com"
}

#定义函数，获取每一个页面的url
def get_per_page_link(url):
    response=requests.get(url=url,headers=request_headers)
    status_code=response.status_code
    if status_code!=200:
        print("网页打不开")
    else:
        html_text=response.text
        soup=BeautifulSoup(html_text,'lxml')
        book_homepage_hrefs=soup.select('div.info>h4>a')
        for book_homepage_href in book_homepage_hrefs:
            book_href='https://txt80.cc'+book_homepage_href.get('href').strip()
            get_book_infor(book_href)
            time.sleep(0.5)

def get_book_infor(book_href):
    resp=requests.get(url=book_href,headers=request_headers)
    status_code=resp.status_code
    if status_code!=200:
        print("网页打不开")
    else:
        html_book_text=resp.content
        html_book_text=html_book_text.decode('utf-8')
        soup_document=BeautifulSoup(html_book_text,'lxml')
        #获取书名
        book_name=soup_document.select(' div.nrlist > dl > dd.bt>h2')[0].get_text().strip()
        #获取作者
        author_name=soup_document.select('div.nrlist > dl > dd:nth-child(3) > a')[0].get_text().strip()
        #获取小说状态
        novel_status=soup_document.select(' div.nrlist > dl > dd:nth-child(4) > span')[0].get_text().strip()
        #获取小说类型
        novel_diversity=soup_document.select('div.nrlist > dl > dd:nth-child(5) > a')[0].get_text().strip()
        #获取授权媒体
        novel_authority=soup_document.select('div.nrlist > dl > dd:nth-child(6) > span')[0].get_text().strip()
        #获取小说格式,并去除小说格式字
        novel_format=soup_document.select('div.nrlist > dl > dd:nth-child(7)')[0].get_text().strip()
        p=re.compile('小说格式：')
        novel_format=re.sub(p,'',novel_format)
        #获取小说大小,并去除空格
        novel_size=soup_document.select('div.nrlist > dl > dd:nth-child(8) > span')[0].get_text().strip()
        p=re.compile('\s')
        novel_size=re.sub(p,'',novel_size)
        #获取发布时间
        publish_time=soup_document.select('div.nrlist > dl > dd:nth-child(9) > span')[0].get_text().strip()
        #获取内容简介
        content_introduction=soup_document.select('div.softsayxq > div')[0].text.strip()
        #获取作者其他小说推荐和url
        author_recommend=[]
        # related_url = []
        for recommend_book in soup_document.select('div.tuijian1_box > ul > li > a'):
            author_recommend.append(recommend_book.get_text().strip())
        related_url = []
        for recommend_book in soup_document.select('div.tuijian1_box > ul > li > a'):
            related_url.append('https://txt80.cc'+recommend_book.get('href').strip())
        #获取小说下载地址
        download_address='https://txt80.cc'+soup_document.select('div.down > div > ul > li > p > b > a')[0].get('href').strip()
        body = {
            "suggestion": gen_sugg("novelindex", ((book_name, 6), (content_introduction, 4))),
            "book_href": book_href,
            "book_name": book_name,
            "author_name": author_name,
            "novel_status": novel_status,
            "novel_diversity": novel_diversity,
            "novel_authority": novel_authority,
            "novel_format": novel_format,
            "novel_size": novel_size,
            "publish_time": publish_time,
            "author_recommend": author_recommend,
            "related_url": related_url,
            "download_address": download_address,
            "pagerank": 0,
            "content_introduction": content_introduction,
        }
        global id
        id = id + 1
        global page_index
        page_index.update({book_href: id})
        global web_graph
        web_graph.update({book_href: related_url})
        # print(page_index)
        # print(web_graph)
        # es.index(index='test', doc_type='_novel', body=body,ignore=400)
        result = es.create(index='novelindex', id=id, body=body, ignore=400)
        print(result)



if __name__=="__main__":
    id=-1
    page_index = {}
    web_graph={}
    urls=['https://txt80.cc/yanqing/index_{}.html'.format(str(i)) for i in range(2,773,1)]
    urls.insert(0, 'https://txt80.cc/yanqing/index.html')
    for url in urls:
        get_per_page_link(url)
        time.sleep(0.3)
    graph = eval(web_graph.__str__())
    index = eval(page_index.__str__())
    for page in index.keys():
        outlinks = graph[page]
        for outlink in outlinks:
            column_number = index.get(outlink, None)
            if column_number is None:
                print('None')
                id=id+1
                page_index.update({outlink:id})
                web_graph.update({outlink:[]})
    with open('./PageRank/page_index.txt', 'w') as pfile:
        pfile.write(page_index.__str__())
    with open('./PageRank/web_graph.txt', 'w') as wfile:
        wfile.write(web_graph.__str__())



