from lxml import etree
import requests
from bs4 import BeautifulSoup
import hashlib
import settings
import mysql.connector

xq = '2021-2022-2'
s = requests.session()
header_info = {
    "User-Agent": settings.USER_AGENT,
    "Referer": "http://csujwc.its.csu.edu.cn/jiaowu/pkgl/llsykb/llsykb_find_xs0101.jsp?xnxq01id=2016-2017-2&init=1&isview=0",
    'Host': 'csujwc.its.csu.edu.cn',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-cn',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Origin': 'http://csujwc.its.csu.edu.cn',
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1',
    'Content-Length': '103',
    'Cookie': settings.COOKIE_JW,
}
url = 'http://csujwc.its.csu.edu.cn/jiaowu/pkgl/llsykb/llsykb_kb.jsp'


def download_html(data):
    req1 = s.post(url, headers=header_info, data=data)
    local_filename = '/home/EveryClass-collector/raw_data/' + \
        data['xs0101id'] + '.html'
    with open(local_filename, 'wb') as f:
        f.write(req1.content)
    return req1


def parse_html(file):
    with open(file) as f:
        text = f.read()
        tree = etree.HTML(text)
        return tree


class Database_helper:
    def __init__(self):
        self.conn = mysql.connector.connect(
            **settings.MYSQL_CONFIG, auth_plugin='mysql_native_password')

    def userid_to_name(self, userid):
        query = 'SELECT name FROM ec_students WHERE xh=%s'
        cursor = self.conn.cursor()

        cursor.execute(query, (userid,))
        res = cursor.fetchall()
        cursor.close()

        if len(res) > 0:
            return res[0][0]
        else:
            return ''

    def name_to_userid(self, userid):
        query = 'SELECT xh FROM ec_students WHERE name=%s'
        cursor = self.conn.cursor()

        cursor.execute(query, (userid,))
        res = cursor.fetchall()
        cursor.close()

        if len(res) > 0:
            return res[0][0]
        else:
            return ''


# def get_course_data(tree):
#     for weekday in range(1, 8):
#         for row_number in range(1, 7):
#             query_selector = '//div[@id="' + get_row_code(xq, row_number) + '-' + str(
#                 weekday) + '-2"]/a'
#             # print(query_selector)
#             res = tree.xpath(query_selector)
#             # print(res)
#             if len(res) > 0:
#                 for course in res:
#                     clsname = course.xpath('text()')
#                     clstime = ['{:d}-{:d}'.format(weekday, row_number)]
#                     teacher = course.xpath('font[@title="老师"]/text()')
#                     duration = course.xpath('font[@title="周次"]/text()')
#                     week = course.xpath('font[@title="单双周"]/text()')
#                     location = course.xpath('font[@title="上课地点教室"]/text()')
#                     course_info = {
#                         'clstime': clstime,
#                         'clsname': clsname,
#                         'teacher': teacher,
#                         'duration': duration,
#                         'week': week,
#                         'location': location
#                     }
#                     for k, v in course_info.items():
#                         course_info[k] = v[0] if len(v) > 0 else 'None'
#                     print(course_info)


# tree = parse_html('raw_data/8207191507.html')
# get_course_data(tree)
# tree.xpath('//div[@id="A0F45AC8A9AC47DA93C18775D220C4EA-3-2"]')
# download(data)
