from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
import json
from lxml import etree
from requests.cookies import RequestsCookieJar
import requests
import os
import logging
import re


class Spider():
    def __init__(self, headless=False):
        self.login_url = 'http://222.205.160.107/jwglxt/xtgl/login_slogin.html?language=zh_CN'
        self.user_informations_url = 'http://222.205.160.107/jwglxt/xsxxxggl/xsgrxxwh_cxXsgrxx.html?gnmkdm=N100801&layout=default&su=%s'
        self.user_grade_url = "http://222.205.160.107/jwglxt/cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=%s"

        self.user_class_url = ''
        self.chromedirver_path = ".\chromedriver.exe"
        self.headless = headless
        self.cookie_path = '.\cookies'
        self.create_dir(self.cookie_path)

        self.chrome_options = Options()
        # 无头
        if self.headless is True:
            self.chrome_options.add_argument('--headless')
            self.chrome_options.add_argument('--disable-gpu')
        self.chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
        self.driver = webdriver.Chrome(executable_path=self.chromedirver_path,
                                       options=self.chrome_options)
        self.driver.get(self.login_url)
        print('初始化完成')

    def simulation_get(self, _account, _password):
        lt = time.time()
        print(time.time() - lt)
        self.driver.get(self.login_url)
        print(time.time() - lt)
        time.sleep(0.3)
        print(time.time() - lt)
        # driver.switch_to.frame()
        # driver.find_element_by_xpath('//*[@id="ampNoLoginTool"]').click()
        self.driver.find_element_by_id('yhm').send_keys(_account)
        self.driver.find_element_by_id('mm').send_keys(_password)
        self.driver.find_element_by_id('dl').click()
        print(time.time() - lt)
        self.driver.get(self.user_informations_url % (_account))
        print(time.time() - lt)
        ex = "学生账号为学号，教职工账号为职工号，初始密码均为证件号码后六位"
        result = re.findall(ex, self.driver.page_source, re.S)
        if len(result) > 0:
            print("login error")
            return False
        print('登录完成')
        # save cookie

        cookies = self.driver.get_cookies()
        with open(self.cookie_path + '\\' + _account + '_cookies.txt', "w") as fp:
            json.dump(cookies, fp)
        print('cookies update complete')
        print(time.time() - lt)
        # print(driver.page_source)

        # xml = etree.HTML(driver.page_source)
        #
        # name = xml.xpath('//*[@id="col_xm"]/p/text()')
        # xuehao = xml.xpath('//*[@id="col_xh"]/p/text()')
        # xingbie = xml.xpath('//*[@id="col_xbm"]/p/text()')

        # photo = xml.xpath('//*[@id="xsrxhZp"]/@src')
        # photo_url = ''
        # print(xuehao)
        # print(name)
        # print(xingbie)
        # print(type(ids))
        # driver.quit()
        self.driver.delete_all_cookies()
        print("本次登录用时%fs" % (time.time() - lt))
        print(time.time() - lt)
        return True

    def create_dir(self, path):
        isExists = os.path.exists(path)
        if not isExists:
            os.makedirs(path)
            print('create folder :' + path)
        else:
            # print('the folder already exists' + '(%s)' % (path))
            pass

    def check_login(self, _account, _password):
        cookie_jar = self.read_cookie(_account, _password)
        if cookie_jar is not False and cookie_jar is not False:
            s = requests.session()
            s.verify = False
            s.headers = {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Encoding': 'gzip, deflate',
                'Accept-Language': 'zh-CN,zh;q=0.8',
                'Cache-Control': 'max-age=0',
                'Connection': 'keep-alive',
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
            }
            post_data = {
                'jg_id': '08',
                'njdm_id': '2017',
                'dlbs': '',
                'zyh_id': '0817',
                'currentPage_cx': '',
                '_search': 'false',
                'nd': '1614932995681',
                'queryModel.showCount': '15',
                'queryModel.currentPage': '1',
                'queryModel.sortName': '',
                'queryModel.sortOrder': 'asc',
                'time': '0'
            }
            r = s.post(
                'http://222.205.160.107/jwglxt/jxzxjhgl/jxzxjhck_cxJxzxjhckIndex.html?doType=query&gnmkdm=N153540&su=%s' % _account,
                data=post_data,
                cookies=cookie_jar)
            # print(r.text)
            ex = "学生账号为学号，教职工账号为职工号，初始密码均为证件号码后六位"
            result = re.findall(ex, r.text, re.S)
            if len(result) > 0:
                print('检测到服务端已下线，正在重新登录，请稍后')
                flag = self.simulation_get(_account, _password)
                if flag is False:
                    return False
            return True
        else:
            return False

    def read_cookie(self, _account, _password):
        # 这里我们使用cookie对象进行处理
        jar = RequestsCookieJar()
        cookiename = _account + '_cookies.txt'
        # print(cookiename)

        files = os.listdir(self.cookie_path)
        cookie_is_exist = False
        for file in files:
            if file == cookiename:
                print(file)
                cookie_is_exist = True
                break
        # TODO 默认此时一定已有cookie,有cookie 一定密码正确
        if cookie_is_exist is False:
            flag = self.simulation_get(_account, _password)
            if flag is False:
                return False
        with open(self.cookie_path + '\\' + cookiename, "r") as fp:
            cookies = json.load(fp)
            for cookie in cookies:
                jar.set(cookie['name'], cookie['value'])
        return jar

    def get_grades(self, _account, cookie_jar, xnm, xqm_num):
        lt_t = time.time()
        s = requests.session()
        s.verify = False
        s.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
        }
        # 教务处
        # xnm 学年码 xqm 学期码 一学期 ：3 二学期：12 三学期 16 全部：空
        xqm = ['', '3', '12', '16']
        out_str = ""

        grade_all = []
        # for year in range(2017, 2021):
        # print(str(year) + '年---' + str(year + 1) + '年')
        post_data = {
            'xnm': xnm,
            'xqm': xqm[xqm_num],
            '_search': 'false',
            'queryModel.showCount': '30',
            'queryModel.currentPage': '1',
            'queryModel.sortName': '',
            'queryModel.sortOrder': 'asc',
            'time': '0',
        }
        # 不重要 拿掉了'nd': '1611836480726',

        r = s.post(
            'http://222.205.160.107/jwglxt/cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=%s' % _account,
            data=post_data,
            cookies=cookie_jar)
        # print(r.status_code)
        grade_data = r.json()
        # print(grade_data)
        for i in grade_data['items']:
            bukao = False
            chongxiu = False
            if "补考" in i['ksxz']:
                bukao = True
            if "重修" in i['ksxz']:
                chongxiu = True
            kclb = '未知'
            if 'kclbmc' in i:
                # print(i['kclbmc'])
                # print(type(i['kclbmc']))
                if i['kclbmc'] == '通识课程':
                    kclb = '通识'
                elif i['kclbmc'] == '专业或专业方向':
                    kclb = '专业'
                elif i['kclbmc'] == '一般课程':
                    kclb = '一般'
                else:
                    kclb = '你猜'

            # kcxz = '未知'
            if i['kcxzmc'] == '公共必修课':
                kcxz = '公必'
            elif i['kcxzmc'] == '专业必修课':
                kcxz = '专必'
            elif i['kcxzmc'] == '公共限定选修课':
                kcxz = '公限'
            elif i['kcxzmc'] == '公共选修课':
                kcxz = '公选'
            elif i['kcxzmc'] == '专业选修课':
                kcxz = '专选'
            elif i['kcxzmc'] == '公共限定性选修课(创业)':
                kcxz = '公创'
            else:
                kcxz = '未知'

            grade_json = {
                "name": i['kcmc'],
                "grade": i['cj'],
                "gpa": i['jd'],
                "point": i['xf'],
                "bukao": bukao,
                "chongxiu": chongxiu,
                "kcxz": kcxz,
                "kclb": kclb
            }
            grade_all.append(grade_json)
            # print(i)

        # grade_all = str(grade_all)
        json_data = json.dumps(grade_all)
        # print(grade_all)
        # grade_all = grade_all.strip('[')
        # grade_all = grade_all.strip(']')
        # grade_all = json.dumps(grade_all)
        print("访问成绩共耗时%fs" % (time.time() - lt_t))
        return json_data

    def get_user_infomation(self, _account, cookie_jar):
        s = requests.session()
        s.verify = False
        s.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
        }

        r = s.get(
            'http://222.205.160.107/jwglxt/xsxxxggl/xsgrxxwh_cxXsgrxx.html?gnmkdm=N100801&layout=default&su=%s' % _account,
            cookies=cookie_jar)
        # print(r.text)
        xml = etree.HTML(r.text)

        name = xml.xpath('//*[@id="ajaxForm"]/div/div[1]/div/div[2]/div/div/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')

        xuehao = xml.xpath('//*[@id="ajaxForm"]/div/div[1]/div/div[1]/div/div/p/text()')[0].strip('\t').strip(
            '\n').strip('\r')
        xingbie = xml.xpath('//*[@id="col_xbm"]/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')
        nianji = xml.xpath('//*[@id="col_njdm_id"]/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')
        xueyuan = xml.xpath('//*[@id="col_jg_id"]/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')
        zhuanye = xml.xpath('//*[@id="col_zyh_id"]/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')
        banji = xml.xpath('//*[@id="col_bh_id"]/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')
        xuezhi = xml.xpath('//*[@id="col_xz"]/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')
        tel = xml.xpath('//*[@id="col_sjhm"]/p/text()')[0].replace('\t', '').replace(
            '\n', '').replace('\r', '')

        print(xuehao, name, xingbie, nianji, xueyuan, zhuanye, banji, xuezhi, tel)

        # information_data = r.json()
        return json.dumps([xuehao, name, xingbie, nianji, xueyuan, zhuanye, banji, xuezhi, tel])

    def get_user_student_status(self, _account, cookie_jar):
        # TODO 获取学生学业情况
        s = requests.session()
        s.verify = False
        s.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
        }
        lt_t = time.time()
        r = s.get(
            'http://222.205.160.107/jwglxt/xsxy/xsxyqk_cxXsxyqkIndex.html?gnmkdm=N105515&layout=default&su=%s' % _account,
            cookies=cookie_jar)

        # print(r.text)
        # 数据在js中 无法用xpath 这里用re
        # ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>'
        print("访问用户信息网页耗时%fs" % (time.time() - lt_t))
        gb_ex = "<p class='title1' id='p.{1,32}?' yxxf='[0-9][0-9]..' yqzdxf='[0-9][0-9]'>公共必修课\"\+"
        # gb_ex = "<p class='title1' id='p70AF977521AC7FAAE0536764A8C08CBB' yxxf='.*?' yqzdxf='.*?'>公共必修课"
        gx_ex = "<p class='title1' id='p.{1,32}?' yxxf='.{1,4}?' yqzdxf='.{1,4}?'>公共限定性选修课\"\+"

        gxc_ex = "<p class='title1' id='p.{1,32}?' yxxf='.{1,4}?' yqzdxf='.{1,4}?'>公共限定性选修课\(创业\)\"\+"

        ggxx_ex = "<p class='title1' id='p.{1,32}?' yxxf='.{1,4}?' yqzdxf='.{1,4}?'>公共选修课\"\+"
        zb_ex = "<p class='title1' id='p.{1,32}?' yxxf='.{1,4}?' yqzdxf='.{1,4}?'>专业必修课\"\+"
        zx_ex = "<p class='title1' id='p.{1,32}?' yxxf='.{1,4}?' yqzdxf='.{1,4}?'>专业选修课\"\+"
        sj_ex = "<p class='title1' id='p.{1,32}?' yxxf='.{1,4}?' yqzdxf='.{1,4}?'>实践环节\"\+"

        # ex = "<p class='title1' id='p70AF977521C97FAAE0536764A8C08CBB' yxxf='.*?' yqzdxf='.*?'>"
        gb_str = re.findall(gb_ex, r.text, re.S)
        gx_str = re.findall(gx_ex, r.text, re.S)
        gxc_str = re.findall(gxc_ex, r.text, re.S)
        ggxx_str = re.findall(ggxx_ex, r.text, re.S)
        zb_str = re.findall(zb_ex, r.text, re.S)
        zx_str = re.findall(zx_ex, r.text, re.S)
        sj_str = re.findall(sj_ex, r.text, re.S)
        print("查找用户信息共耗时%fs" % (time.time() - lt_t))
        # gb_str = re.findall(sj_ex, r.text, re.S)
        # print(gb_str)
        # print(gx_str)
        # print(gxc_str)
        # print(zb_str)
        # print(zx_str)
        # print(sj_str)
        all_key = ['公共必修课', '公共限定性选修课', '公共限定性选修课(创业)', '公共选修课', '专业必修课', '专业选修课', '实践环节']
        all_data = [gb_str, gx_str, gxc_str, ggxx_str, zb_str, zx_str, sj_str]
        all_result = []
        xfyqjd_id = []
        ex = "\'[0-9\.]{1,4}?\'"
        id_ex = "id=\'p.{1,32}?\'"
        for i, d in enumerate(all_data):
            rope = re.findall(ex, d[0], re.S)
            id_rope = re.findall(id_ex, d[0], re.S)
            # print(id_rope[0][5:])
            xfyqjd_id.append(id_rope[0][5:].strip('\''))
            rope[0] = rope[0].strip('\'')
            rope[1] = rope[1].strip('\'')
            all_result.append({
                "type": all_key[i], "yxxf": rope[0], "yqzdxf": rope[1]
            })

        #找到 绩点
        jd_ex = "<font size=\"2px\">当前所有课程<a class=\"clj\" name=\"showGpa\">平均学分绩点</a>（GPA）：<font size=\"2px\" style=\"color: red;\">.{1,4}?</font>"
        jd1_ex = "\">.{1,4}?<"
        jd_str = re.findall(jd_ex, r.text, re.S)
        # print(r.text)
        jd1_str = re.findall(jd1_ex,jd_str[0], re.S)

        pjjd = jd1_str[0][2:].strip('<')

        # 未拿到学分的计算
        credits_obtained = 0
        for i in all_result:
            # print(i['yqzdxf'])
            credits_obtained = credits_obtained + float(i['yxxf'])
        outer_point = self.get_outer_ponit(_account, cookie_jar)
        print(credits_obtained)
        print(">>>>>>>>>>>>>>>>")


        # print(credits_obtained)

        # 获取已完成的门数
        # xfyqjd_id = ['70AF977521AC7FAAE0536764A8C08CBB', '70AF977521F57FAAE0536764A8C08CBB',
        #              '70AF977521F17FAAE0536764A8C08CBB', '70AF977521F37FAAE0536764A8C08CBB',
        #              '70AF977521C97FAAE0536764A8C08CBB', '70AF977521DC7FAAE0536764A8C08CBB',
        #              '70AF977521BC7FAAE0536764A8C08CBB']

        base_url = 'http://222.205.160.107/jwglxt/xsxy/xsxyqk_cxJxzxjhxfyqFKcxx.html?gnmkdm=N105515&su=%s' % _account

        post_data = {
            'xfyqjd_id': '',
            'cjlrxn': '2019',
            'cjlrxq': '12',
            'bkcjlrxn': '2020',
            'bkcjlrxq': '3',
            'xscjcxkz': '0',
            'cjcxkzzt': '0',
            'cjztkz': '0',
            'cjzt': ''
        }

        class_sum = 0
        point_add = 0
        cj_add = 0
        fxcj_add = 0
        for i, d in enumerate(xfyqjd_id):
            post_data['xfyqjd_id'] = d
            r = s.post(base_url, data=post_data, cookies=cookie_jar)
            # print(len(r.json()))
            res = r.json()
            all_result[i]['yx'] = len(r.json())
            # all_result['yqzdxf']

            for j in r.json():

                if j['KCXZMC'] == '公共限定性选修课(创业)':
                    continue
                elif j['KCXZMC'] == '公共限定性选修课':
                    continue
                elif j['KCXZMC'] == '公共选修课':
                    continue

                # print(j['CJ'])

                if j['CJ'] == '优秀':
                    cj = 95
                elif j['CJ'] == '良好':
                    cj = 85
                elif j['CJ'] == '中等':
                    cj = 75
                elif j['CJ'] == '及格':
                    cj = 65
                elif j['CJ'] == '不及格':
                    cj = 50
                    continue
                else:
                    cj = float(j['CJ'])
                # print(j['KCXZMC'])
                point_add = point_add + j['JD'] * float(j['XF'])

                class_sum = class_sum + 1
                cj_add = cj_add + cj
                fxcj_add = fxcj_add + float(j['XF']) * cj

            # print(class_sum)
            # print(">>>>")
            # point_add = point_add / len(r.json())
        pjcj = cj_add / class_sum
        pjcfcj = fxcj_add / credits_obtained
        # print(class_sum)
        # print('credits_obtained is %f' % credits_obtained)
        # print('pjcj is %f' % pjcj)
        # print('pjcfcj is %f' % pjcfcj)
        # print('pjjd is %s' % pjjd)
        # print(">>>")

        # 获取四六级有关的信息
        cetcj = self.get_cet_grade(_account, cookie_jar)

        # 数据整合
        r_data = {
            "yhxf": str(outer_point),
            'zdxf': str(outer_point - credits_obtained),
            'pjcj': str(round(pjcj,1)),
            'pjxfcj': str(round(pjcfcj,1)),
            'pjxfjd': pjjd,
            'cetcj': cetcj,
            'cjxx': all_result
        }

        return json.dumps([r_data])

    def get_cet_grade(self, _account, cookie_jar):
        lt_t = time.time()
        s = requests.session()
        s.verify = False
        s.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
        }
        lt_t = time.time()
        post_data = {
            '_search': 'false',
            'nd': '1614931000808',
            'queryModel.showCount': '15',
            'queryModel.currentPage': '1',
            'queryModel.sortName': 'bmsj',
            'queryModel.sortOrder': 'desc',
            'time': '0'
        }
        r = s.post(
            'http://222.205.160.107/jwglxt/kjgl/kjbm_cxXskjbm.html?doType=query&pkey=&xmlbfl=1001&gnmkdm=N2510&su=%s' % _account,
            data=post_data,
            cookies=cookie_jar)
        result = r.json()
        CET4_l = []
        CET6_l = []
        PC_l = []
        for i in result['items']:
            # print(i['xmmc'])
            if i['xmmc'] == 'CET4':
                CET4_l.append(int(i['xmcj']))
            elif i['xmmc'] == 'CET6':
                CET6_l.append(int(i['xmcj']))
            elif i['xmmc'] == '普通话等级考试':
                PC_l.append(0)
        # print(max(CET4_l))

        CET_grade = []
        if CET4_l:
            CET_grade.append({
                'xmmc': 'CET4',
                'xmcj': str(max(CET4_l))
            })
        if CET6_l:
            CET_grade.append({
                'xmmc': 'CET6',
                'xmcj': str(max(CET6_l))
            })
        if PC_l:
            CET_grade.append({
                'xmmc': '普通话等级考试',
                'xmcj': ''
            })
        print('查找用户cet成绩用时%fs' % (time.time() - lt_t))
        return CET_grade

    def get_outer_ponit(self, _account, cookie_jar):
        lt_t = time.time()
        s = requests.session()
        s.verify = False
        s.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
        }
        post_data = {
            'jg_id': '08',
            'njdm_id': '2017',
            'dlbs': '',
            'zyh_id': '0817',
            'currentPage_cx': '',
            '_search': 'false',
            'nd': '1614932995681',
            'queryModel.showCount': '15',
            'queryModel.currentPage': '1',
            'queryModel.sortName': '',
            'queryModel.sortOrder': 'asc',
            'time': '0'
        }
        r = s.post(
            'http://222.205.160.107/jwglxt/jxzxjhgl/jxzxjhck_cxJxzxjhckIndex.html?doType=query&gnmkdm=N153540&su=%s' % _account,
            data=post_data,
            cookies=cookie_jar)
        result = r.json()
        print(result['items'][0]['zdxf'])
        print("查找毕业所需学分用时%fs" % (time.time() - lt_t))
        return float(result['items'][0]['zdxf'])


if __name__ == "__main__":
    s = Spider(headless=True)
    s.check_login("1708170228", "13782735215CFSio")
    cookies = s.read_cookie("1708170228", "13782735215CFSio")
    s.get_user_student_status("1708170228", cookies)
    # s.simulation_get("1708170228", "13782735215CFSio")
    # cookies = s.read_cookie("1708170228", "13782735215CFSio")
    # s.get_outer_ponit("1708170228", cookies)
