import requests
import js2xml
from lxml import etree
import socket
import datetime
from DAO.DAO_research_direction import DAO_research_direction
from DAO.DAO_major_catalog import DAO_major_catalog
class UPDATE_research_direction:
    # 爬取所有学校的专业链接
    def major_catalog_link(self):
        urls = ['http://college.wendu.com/index.php?m=university&c=search&a=search&page={}'.format(str(i)) for i in range(1, 70)]
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        for url in urls:
            print(url)
            socket.setdefaulttimeout(600)
            response = requests.get(url, headers=headers, timeout=600)
            response_html = etree.HTML(response.content.decode())
            #schools = response_html.xpath("//a[@class='school_name']")
            schools = response_html.xpath("//a[@class='school-list-itemLeft']")
            # print(schools)
            for school in schools:
                school_name = school.xpath(".//h3[@class='school-item-name']/text()")[0]
                major_link = "http://college.wendu.com/major_list-" + school.xpath("./@href")[0].split("-")[1]+"-1-a-a"
            #     dao_major_catalog = DAO_major_catalog()
            #     # 根据大学名字判断学术类型是否为空
            #     flag = dao_major_catalog.academic_empty(school_name)
            #     # 为空进行下一步
            #     if flag:
                print(school_name, major_link)
                self.details_link(major_link)
            #     #  不为空输出数据已存在，不进行下一步
            #     else:
            #         print(school_name, "数据已存在")
                break
            break
    # 爬取某一个学校的专业的查看详情链接
    def details_link(self, url):
        # url = 'http://college.wendu.com/major_list-1-1-a-a'
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        socket.setdefaulttimeout(600)
        response = requests.get(url, headers=headers, timeout=600)
        response_str = response.content.decode()
        response_html = etree.HTML(response_str)
        script_list = response_html.xpath("//script/text()")
        # print(script_list)
        # print(script_list[11])
        try:
            script_xml = js2xml.parse(script_list[11], encoding='utf-8', debug=False)
            # print(script_xml)
            script_tree = js2xml.pretty_print(script_xml)
            # print(script_tree)
            script_data_html = etree.HTML(script_tree)
            div_all = script_data_html.xpath('//var[@name="datas"]/array/object')
            for div_one in div_all:
                unvid = div_one.xpath('.//property[@name="unvid"]/string/text()')[0]
                acadid = div_one.xpath('.//property[@name="acadid"]/string/text()')[0]
                majorid = div_one.xpath('.//property[@name="majorid"]/string/text()')[0]
                details_link = "http://college.wendu.com/major_detail-" + unvid + "-" + acadid + "-" + majorid
                maj_name = div_one.xpath('.//property[@name="major_name"]/string/text()')[0]
                maj_uni_name = response_html.xpath('//p[@class="where"]/a[2]/text()')[0]
                dao_major_catalog = DAO_major_catalog()
                # 判断 学术类型 是否为空
                # flag = dao_major_catalog.maj_academic_empty(maj_uni_name, maj_name)
                # if flag:
                print(details_link, maj_name, maj_uni_name)
                self.research_direction(details_link)
                # else:
                #     print(maj_uni_name, maj_name, "数据已存在")
        except:
            print("爬取查看详情链接有错误》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》")
    # 在查看详情页面爬取 研究方向 具体数据，并插入数据库表research_direction
    def research_direction(self, url):
        # url = "http://college.wendu.com/major_detail-1-122-2"
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        socket.setdefaulttimeout(600)
        response = requests.get(url, headers=headers, timeout=600)
        response_str = response.content.decode()
        response_html = etree.HTML(response_str)
        # 专业名称 res_maj_name
        res_maj_name = response_html.xpath("//p[@class='where']/text()")[2]
        # 院校名称 res_college_name
        res_college_name = response_html.xpath("//ul[@class='clearfix']/li[2]/span/text()")[0]
        # 学校名称 res_uni_name
        res_uni_name = response_html.xpath("//ul[@class='clearfix']/li[1]/span/text()")[0]
        # 学术类型 maj_academic
        maj_academic = response_html.xpath("//ul[@class='clearfix']/li[3]/span/text()")[0]
        # 所有研究方向 research_direction_one
        research_direction_one = response_html.xpath("//ul[@class='jb_content']/li[2]/div[@class='menu']/div")[0]
        # 招生年份 res_year
        res_year = response_html.xpath("//ul[@class='jb_content']/li[3]/ul/li[1]/text()")[0].split("：")[1]
        #  报考人数 res_register_num （无数据）
        res_register_num = response_html.xpath("//ul[@class='jb_content']/li[3]/ul/li[3]/text()")[0].split("：")[1]
        # 录取人数 res_enroll_num（无数据）
        res_enroll_num = response_html.xpath("//ul[@class='jb_content']/li[3]/ul/li[2]/text()")[0].split("：")[1]
        # 推免人数 res_recommend_num（无数据）
        res_recommend_num = response_html.xpath("//ul[@class='jb_content']/li[3]/ul/li[5]/text()")[0].split("：")[1]
        # 94行 到 121行 通过一个小算法，获取研究方向的真实可存数据
        r_d_div_list = research_direction_one.xpath("./text()")
        r_d_span_list = research_direction_one.xpath("./span/text()")
        r_d_p_list = research_direction_one.xpath("./p/text()")
        res_name = ""
        res_name_split = "<br>"
        # print("div", r_d_div_list)
        # print("span", r_d_span_list)
        # print("p", r_d_p_list)
        r_d_div_one_one = ""
        for r_d_div_one in r_d_div_list:
            if len(r_d_div_one.split()) == 0:
                continue
            for r_d_div_one_ in r_d_div_one.split():
                r_d_div_one_one = r_d_div_one_one+r_d_div_one_
            r_d_div_one = r_d_div_one_one
            res_name = res_name + r_d_div_one + res_name_split
        if len(r_d_span_list) > 0:
            for r_d_span_one in r_d_span_list:
                res_name = res_name + r_d_span_one + res_name_split
        if len(r_d_p_list) > 0:
            for r_d_p_one in r_d_p_list:
                res_name = res_name + r_d_p_one + res_name_split
        # print(res_name)
        if res_name is not "":
            try:
                res_way = res_name.split(")")[0].split("(")[1]
            except:
                res_way = ""
            # 94行 到 121行 通过一个小算法，获取研究方向的真实可存数据
            #dao = DAO_research_direction()
            print(res_year, res_name, res_maj_name, res_college_name, res_uni_name, res_way, res_register_num, res_enroll_num, res_recommend_num)
            # dao.add(res_year, res_name, res_maj_name, res_college_name, res_uni_name, res_way, res_register_num, res_enroll_num, res_recommend_num)
        #dao_major_catalog = DAO_major_catalog()
        # 把获取的学术类型数据，更新到专业目录表
        #dao_major_catalog.update_maj_academic(maj_academic, res_uni_name, res_college_name, res_maj_name)


if __name__ == '__main__':
    print("1、爬取所有院校 研究方向 数据")
    print("2、爬取一个专业 研究方向 数据")
    print("3、清空数据库")
    print("4、输出所有数据")
    #res = int(input())
    res = 1
    if res == 1:
        # 爬取所有院校 招生简章 数据

        starttime = datetime.datetime.now()
        research_direction = UPDATE_research_direction()
        research_direction.major_catalog_link()
        endtime = datetime.datetime.now()
        processtime = endtime-starttime
        print("开始时间: ", starttime, "   结束时间: ", endtime)
        print("运行时间: ", processtime, "秒")
    elif res == 2:
        research_direction = UPDATE_research_direction()
        # research_direction.research_direction("http://college.wendu.com/major_detail-168-6754-136")
        research_direction.details_link("http://college.wendu.com/major_list-3-1-a-a")
    elif res == 3:
        dao = DAO_research_direction()
        #dao.truncate()
    elif res == 4:
        dao = DAO_research_direction()
        #dao.sel_all()

# print(maj_college_name_list)
