import json
from bs4 import BeautifulSoup
import re
from selenium import webdriver
import time
from queue import Queue
import os
import network
import page_analysis
from urllib.request import quote, unquote

# 打开json文件，转为字典
def open_json(path):
    with open(path, 'r', encoding='utf-8') as f:
        url_json = json.load(f)

    return  url_json


# 如果断网，重连通过网页内容判断（火狐）
def juge_internet(soup):
    try:
        inter=soup.findAll('div',{'class':'title'})[0].get_text(strip=True)
        # 断网了
        if inter=='呃…找不到这个网站。':
            network.disconnect('liufei')
            network.connect('liufei','15955124015')
            return False
        else:
            return True
    except:
        return True


# 设置并打开浏览器，参数为打开时间
def new_browser():
    profile = webdriver.FirefoxProfile()
    profile.set_preference("permissions.default.image", 2)
    browser = webdriver.Firefox(profile)
    # browser.minimize_window()
    # 设定页面加载限制时间
    browser.set_page_load_timeout(3)
    try:
        browser.get('https://baike.baidu.com/')
    except:
        browser.execute_script('window.stop()')
    html = browser.page_source
    soup = BeautifulSoup(html, 'lxml')
    if not juge_internet(soup):
        browser.close()
        new_browser()

    return browser


# 输入词条名称，进行搜索
def browser_click(browser,name):
    serachElement=browser.find_element_by_id("query")
    serachElement.clear()
    serachElement.send_keys(name)
    browser.find_element_by_id("search").click()
    return browser


# 判断网页是否有用
def exact_useless(html):
    soup = BeautifulSoup(html, 'lxml')
    try:
        inter_1 = soup.findAll('form', {'id': 'searchForm'})[0] # 通过定位搜索框检测是否有用
        inter_3=soup.findAll('dd', {'class': 'lemmaWgt-lemmaTitle-title'})[0]   #通过定位词条名称检测是否有效
        inter_2 = soup.findAll('div', {'class': 'create-entrance'}) # 判断是否收录了该词条
        if inter_2:
            returnText = inter_2[0].p.get_text()
            print()
            if re.match(r'.*百度百科尚未收录.*', returnText):
                # 没有收录，无效
                return None
        else:
            return True
    except:
        # 没有搜索框，无效
        return None


# 判断是否反爬
def find_anti_crawl(html):
    soup = BeautifulSoup(html, 'lxml')
    try:
        # 如果inter_2能找到，证明反爬了
        inter_2 = soup.findAll('1', {'2': '3'})[0]
        return None
    except:
        return True


def open_url_by_name(browser , name_OR_ID , tag):
    '''
    # 通过词条名称或者词条ID搜索并打开词条网页,返回html
    :param browser:
    :param name_OR_ID: 词条名称或词条ID
    :param tag: 若tag为"name"，则通过词条名称打开，如果tag为"ID"，则通过词条ID打开
    :return: 若反爬，则返回"anti";若无效，则返回"useless";若有效，则返回html
    '''
    try:
        if tag=="name":
            browser=browser_click(browser,name_OR_ID)   # 通过词条名称打开
        if tag=="ID":
            browser.get(name_OR_ID) # 通过词条ID打开
    except:
        browser.execute_script('window.stop()') # 页面加载超时，强制终止

    html = browser.page_source

    # 判断页面是否有错误
    if not find_anti_crawl(html):   # 反爬了
        browser.close()
        print('error!!!!!!!!!!')
        return 'anti'
    elif not exact_useless(html):   # 无用网页
        try:
            browser.get('https://baike.baidu.com/')
        except:
            browser.execute_script('window.stop()')
        print('error==========')
        return 'useless'
    else:   #有效
        return html


def crawl_one_page(html):
    '''
    爬取一个网页
    :param name_OR_ID: 词条名称或词条ID
    :param tag: 若tag为"name"，则通过词条名称打开，如果tag为"ID"，则通过词条ID打开
    :param browser:
    :return:
    '''

    propertyDict=page_analysis.get_property(html)   #属性名字典
    abstract=page_analysis.get_abstract(html)   #摘要字符串
    text=page_analysis.get_text(html)   #文本字符串
    nameID=page_analysis.get_name(html)   #词条名称
    tongyi_list=page_analysis.get_tongyi(html)  #获取同义词url数组
    url=unquote(browser.current_url)

    propertyDict['strID']=nameID
    propertyDict['numID']=url

    return nameID,propertyDict,abstract,text,tongyi_list,url


if __name__=='__main__':

    # 打开已爬取的词条，key为词条名称，
    # value为1则表示为人物，value为0表示不为人物,value为-1表示无效网址,1024表示意外情况出错
    have_crawl_urlDicts=open_json('have_crawl.json')
    # 所有人物名称
    allUsefulPeopleDict=open_json('all_name.json')

    nameID_url_dict={}  #姓名，url对应字典
    nameID_text_dict={} #姓名，所有文本字典
    num_property_dict={} #个数，属性字典
    nameID_tongyiList_dict={}   #姓名，同义词数组字典

    num=500000    # 人物计数器
    fileNum=501    # 文件数目计数器
    have_crawl_num = 0   # 已打开网页的数目
    browser=new_browser()

    for name in allUsefulPeopleDict.keys():
        if name in have_crawl_urlDicts.keys(): # 已经爬过
            continue
        open_result = open_url_by_name(browser, name, 'name')  # 打开网页
        if open_result == 'anti':   #网页反爬
            have_crawl_urlDicts[name]=-1
            browser = new_browser()
            have_crawl_num+=1
            continue
        if open_result == 'useless':  # 无效
            have_crawl_urlDicts[name]=1024
            have_crawl_num+=1
            continue
        html = open_result  # 词条有效
        (nameID,propertyDict,abstract,text,tongyi_list,url)=crawl_one_page(html)  #爬取并获得网页内容
        # 记录
        nameID_url_dict[nameID]=url
        nameID_text_dict[nameID]=abstract+'\n'+text
        num_property_dict[nameID]=propertyDict
        nameID_tongyiList_dict[nameID]=tongyi_list
        have_crawl_urlDicts[name] = 1  #已爬取
        print(str(num)+'\t'+nameID)
        num=num+1
        have_crawl_num += 1

        if have_crawl_num%100==0:    #定期关闭页面，清理缓存
            browser.close()
            browser = new_browser()
        if num == fileNum * 1000:
            with open('./result/nameID_url_dict_' + str(fileNum) + '.json', 'w', encoding='utf-8') as f:  #存储nameID_url_dict
                f.write(json.dumps(nameID_url_dict, indent=4, ensure_ascii=False))
            with open('./result/nameID_text_dict_' + str(fileNum) + '.json', 'w', encoding='utf-8') as f:  #存储nameID_text_dict
                f.write(json.dumps(nameID_text_dict, indent=4, ensure_ascii=False))
            with open('./result/num_property_dict_' + str(fileNum) + '.json', 'w', encoding='utf-8') as f:  #存储num_property_dict
                f.write(json.dumps(num_property_dict, indent=4, ensure_ascii=False))
            with open('./result/nameID_tongyiList_dict_' + str(fileNum) + '.json', 'w', encoding='utf-8') as f:  #存储nameID_tongyiList_dict
                f.write(json.dumps(nameID_tongyiList_dict, indent=4, ensure_ascii=False))
            with open('have_crawl.json', 'w', encoding='utf-8') as f:    # 存储已爬词条名称
                f.write(json.dumps(have_crawl_urlDicts, indent=4, ensure_ascii=False))

            nameID_url_dict = {}  # 姓名，url对应字典
            nameID_text_dict = {}  # 姓名，所有文本字典
            num_property_dict = {}  # 个数，属性字典
            nameID_tongyiList_dict = {}  # 姓名，同义词数组字典
            fileNum = fileNum + 1    # 文件计数加一

    with open('./result/nameID_url_dict_' + str(fileNum) + '.json', 'w', encoding='utf-8') as f:  # 存储nameID_url_dict
        f.write(json.dumps(nameID_url_dict, indent=4, ensure_ascii=False))
    with open('./result/nameID_text_dict_' + str(fileNum) + '.json', 'w', encoding='utf-8') as f:  # 存储nameID_text_dict
        f.write(json.dumps(nameID_text_dict, indent=4, ensure_ascii=False))
    with open('./result/num_property_dict_' + str(fileNum) + '.json', 'w',encoding='utf-8') as f:  # 存储num_property_dict
        f.write(json.dumps(num_property_dict, indent=4, ensure_ascii=False))
    with open('./result/nameID_tongyiList_dict_' + str(fileNum) + '.json', 'w',encoding='utf-8') as f:  # 存储nameID_tongyiList_dict
        f.write(json.dumps(nameID_tongyiList_dict, indent=4, ensure_ascii=False))
    with open('have_crawl.json', 'w', encoding='utf-8') as f:  # 存储已爬词条名称
        f.write(json.dumps(have_crawl_urlDicts, indent=4, ensure_ascii=False))