import requests
import datetime
import socket
from lxml import etree
from DAO.Second_DAO_major_catalog import Second_DAO_major_catalog
# 此类爬取部分院校的  学科评估和所属学科
class UPDATE_major_catalog_level_subject:
    def column1_maj_subject(self):
        #http: // www.cdgdc.edu.cn / webrms / pages / Ranking / xkpmGXZJ2016.jsp
        #url ="http://www.cdgdc.edu.cn/webrms/pages/Ranking/xkpmGXZJ2016.jsp"
        url_pre = "http://www.cdgdc.edu.cn"
        url = "http://www.cdgdc.edu.cn/webrms/pages/Ranking/xkpmGXZJ2016.jsp"
        headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
                   "Referrer Policy": "strict-origin-when-cross-origin",
                   "Cookie":"rcTiSYzKWnSvS=5KChcyCC8JI6fQspjE_owLqem6ee9ZnK6pea3VmnGz6gsH6ubkxN7rg7ASIVT3L_ccz7tivUUSS8szKA_s0GXzA; UM_distinctid=178c0c0c224a1f-0de972f165a95c-5771031-144000-178c0c0c2256f0; Hm_lvt_eaa57ca47dacb4ad4f5a257001a3457c=1618661549; Hm_lpvt_eaa57ca47dacb4ad4f5a257001a3457c=1618661687; rcTiSYzKWnSvT=53o.pJKr_0s3qqqmg6ZViJGj__D9hlShOIg40IkgSQBYhlLZ7XrFESztkt4TWv8af6jIV94aYJhYtjgj5QJnQ3CPys0R9dK_JjfjION_JNU38jhDiDSViZUjZG5bDR1GyaagHyEegEbi6LeHPYAEaldU7QXq_5mF.mWD14ypPKVh9N.ro3Zqlo4sBboiTkpoBgAbNZ85mW88Yry6jgtAayQ45N6d4mpB7KvB3e5fq.7g4wY2qHbyxyCpSdJWbjT5HZldDSluw6PoFTPIqv5MRBTQ1ayNhdpqIStlnTJvAHXH2o3NEYUuLN7W0PikMXp12E"}
        socket.setdefaulttimeout(600)
        response_pre = requests.get(url_pre, headers=headers, timeout=600)
        # response = requests.get(url, headers=headers, timeout=600)
        # 因为爬取页面的编码格式是gbk格式，所以这里设置为gbk
        response_pre_json_str = response_pre.content.decode()
        # response_json_str = response.content.decode("GBK")

        print(response_pre_json_str)
        # print(response_json_str)
        # response_html = etree.HTML(response_json_str)   # type(response_html)   <class 'lxml.etree._Element'>
        # print(response_html)
        # try:
        #     for i in range(7):
        #         xpath_maj_subject = "//table[@align='left']/tr/td/p["+str(i+1)+"]/a/text()"
        #         maj_subject = response_html.xpath(xpath_maj_subject)[0]
        #         xpath_maj_subject_link = "//table[@align='left']/tr/td/p["+str(i+1)+"]/a/@href"
        #         maj_subject_link = "http://www.cdgdc.edu.cn/webrms/pages/Ranking/"+response_html.xpath(xpath_maj_subject_link)[0]
        #         print(i+1, maj_subject, maj_subject_link)
        #         self.column2_maj_name(maj_subject, maj_subject_link)
        # except Exception as e:
        #     print("爬取第1列有错误》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》")
        #     print(e)
    def column2_maj_name(self, maj_subject, url):
        headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        socket.setdefaulttimeout(600)
        response = requests.get(url, headers=headers, timeout=600)
        # 因为爬取页面的编码格式是gbk格式，所以这里设置为gbk
        response_json_str = response.content.decode("GBK")
        # print(response_json_str)
        response_html = etree.HTML(response_json_str)   # type(response_html)   <class 'lxml.etree._Element'>
        try:
            for i in range(len(response_html.xpath("//table//table//p/a"))):
                xpath_maj_name = '//table//table//p['+str(i+1)+']/a/text()'
                maj_name = response_html.xpath(xpath_maj_name)[0].split()[1]
                xpath_maj_name_link = '//table//table//p['+str(i+1)+']/a/@href'
                maj_name_link = 'http://www.cdgdc.edu.cn/webrms/pages/Ranking/'+response_html.xpath(xpath_maj_name_link)[0]
                print('\t', i+1, maj_subject, maj_name, maj_name_link)
                self.column3_maj_level(maj_subject, maj_name_link)
        except Exception as e:
            print("爬取第2列有错误》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》")
            print(e)
    def column3_maj_level(self, maj_subject, url):
        headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        socket.setdefaulttimeout(600)
        response = requests.get(url, headers=headers, timeout=600)
        # 因为爬取页面的编码格式是gbk格式，所以这里设置为gbk
        response_json_str = response.content.decode("GBK")
        # print(response_json_str)
        response_html = etree.HTML(response_json_str)   # type(response_html)   <class 'lxml.etree._Element'>
        # 有的有两个td，前面一个是ABC，后面一个是学校专业。
        # 有的有一个td，前面没有ABC，ABC和上面的一样，只有一个学校专业
        div_arr = response_html.xpath("//table//table//table//tr//td[1]")
        # print(len(div_arr))
        # level和上面的ABC一样
        level = ''
        second_DAO_major_catalog = Second_DAO_major_catalog()
        try:
            for i in range(len(div_arr)):
                # 如果这一行是有学术评估的一行（后面有学校名）
                # xpath_length_td2解析后的 长度为1说明是有学术评估ABC的一行，把学术评估ABC赋值给level，下面一样的学校专业的学术评估ABC也可以用level
                # xpath_length_td2解析后的 长度为0说明是没有学术评估ABC的一行
                xpath_length_td2 = '//table//table//table/tr['+str(i+1)+']//td[2]'
                maj_name = response_html.xpath("//table//table//tr[1]//strong/text()")[0].split()[1]
                if len(response_html.xpath(xpath_length_td2)) == 1:
                    xpath_level = '//table//table//table//tr['+str(i+1)+']//td[1]/text()'
                    xpath_uni_name = '//table//table//table//tr['+str(i+1)+']//td[2]/div/text()'
                    level = response_html.xpath(xpath_level)[0]
                    maj_uni_name = response_html.xpath(xpath_uni_name)[0].split()[1]
                else:
                    xpath_uni_name = '//table//table//table//tr[' + str(i + 1) + ']//td[1]/div/text()'
                    maj_uni_name = response_html.xpath(xpath_uni_name)[0].split()[1]
                print('\t\t', i + 1, maj_subject, level, maj_uni_name, maj_name)
                # 如果学校和专业对应的maj_level为空，则更新。
                # 否则，提示已有数据
                # if second_DAO_major_catalog.maj_level_empty(maj_uni_name, maj_name):
                second_DAO_major_catalog.update_maj_level_maj_subject(level, maj_subject, maj_uni_name, maj_name)
                # else:
                #     print("\t\t\t", maj_uni_name, maj_name, "数据已存在-----------------------------------------")
        except Exception as e:
            print("爬取第3列有错误》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》")
            print(e)
if __name__ == '__main__':

    update_major_catalog_level_subject =UPDATE_major_catalog_level_subject()
    # starttime = datetime.datetime.now()
    update_major_catalog_level_subject.column1_maj_subject()
    # endtime = datetime.datetime.now()
    # processtime = endtime - starttime
    # print("开始时间: ", starttime, "   结束时间: ", endtime)
    # print("运行时间: ", processtime, "秒")

