
import lxml
import requests
from lxml import etree
import re


# 获取登陆参数(内网登陆加密了，在web端有一个js自动解密的一个过程，但爬虫得自己写一个获取参数并解密)



def get_parameter():
    # 请求头，作用是伪装为浏览器访问
    headers = {
        'Host': 'class.sise.com.cn:7001',
        'Content-Type': 'application/x-www-form-urlencoded',
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
        'Referer': 'http://class.seig.edu.cn:7001/sise/',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8'
    }
    url = 'http://class.seig.edu.cn:7001/sise/'
    # 发送请求
    request = requests.get(url, headers=headers)
    # 返回响应response
    response = request.text
    # 将文本转化为HTML便于Xpath解析
    e = etree.HTML(response)
    # 使用xpath解析response
    # 获得3个随机参数，这是破解内网登陆的一步
    name = e.xpath('//div/form/input[1]/@name')[0]
    value = e.xpath('//div/form/input[1]/@value')[0]
    random = e.xpath('//div/form/input[2]/@value')[0]
    return name, value, random


# 登陆并爬取相关数据
def land_sise_get_info(url, user, pw):
    # username = input("学号")
    # password = input("密码")
    username = user
    password = pw

    # 调用函数得到3个随机变量
    name, value, random = get_parameter()
    # 在进行正式登陆时，要确保与上次保持会话，其输入的3个随机变量才会与上次相同
    session = requests.Session()
    # 请求头，作用是伪装为浏览器访问
    headers = {
        'Host': 'class.sise.com.cn:7001',
        'Content-Type': 'application/x-www-form-urlencoded',
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
        'Referer': 'http://class.sise.com.cn:7001/sise/',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8'
    }

    # 登录URL
    login_url = 'http://class.seig.edu.cn:7001/sise/login_check_login.jsp'
    # 登陆时需要传入3个上次得到的变量和账号密码
    login_dates = {
        name: value,
        'random': random,
        'username': username,
        'password': password
    }
    # 发送请求
    session.post(url=login_url, data=login_dates, headers=headers)

    target_url = url
    # 获取信息
    target_request = session.get(url=target_url, headers=headers)

    # 解析
    target_response = target_request.text

    # 将课程表的html全部保存下来
    # save_info(target_response)

    # 打开爬取到的html
    # os.system('mysise.html')

    # 打印出课程表
    a = get_kcb_info(target_response)
    return a


# 解析爬取到的数据
def get_kcb_info(info):
    e = etree.HTML(info)
    a = []
    # 爬取到tbody时要将tbody标签删除
    if e.xpath('/html/body/form/table[4]/tr/td/table/tr/td/span/*'):
        s = e.xpath('/html/body/form/table[4]/tr/td/table/tr/td/span/*')[0].text
        s = s.replace(' ', '')
        s = s.replace('\xa0', '')
        s = re.split('[:]', s)
        a.append(s[1][:-2])
        a.append(s[2][:-2])
        a.append(s[3][:-2])
        a.append(s[4])
    return a


if __name__ == '__main__':
    a = land_sise_get_info(url='http://class.seig.edu.cn:7001/sise/module/student_schedular/student_schedular.jsp',
                           user="2040231160",
                           pw="82322866qqqQQQ@"
                           )
    print(a)
