# python3.5 从西电小蘑菇爬取课表

import csv
import re
import http.cookiejar
import urllib.request
import urllib.parse

from bs4 import BeautifulSoup


def getOpener(head):
    # deal with the Cookies
    cj = http.cookiejar.CookieJar()
    pro = urllib.request.HTTPCookieProcessor(cj)
    opener = urllib.request.build_opener(pro)
    header = []
    for key, value in head.items():
        elem = (key, value)
        header.append(elem)
    opener.addheaders = header
    return opener


header = {

    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
    'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",

    "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
    "Accept-Encoding": "gzip, deflate",
    "Referer": "http://115.159.48.227:8080/XDxiaoMogu/queryMasterTimeTable.htm?winzoom=1",
    "Content-Type": "application/x-www-form-urlencoded"
}

url = 'http://115.159.48.227:8080/XDxiaoMogu/queryMasterTimeTable.htm?winzoom=1'
url2 = 'http://115.159.48.227:8080/XDxiaoMogu/masterTimetableResult'
opener = getOpener(header)
op = opener.open(url)
data = op.read()

username = '16011203XX'
password = 'XXXXXX'

postDict = {

    'username': username,
    'password': password,
}
postData = urllib.parse.urlencode(postDict).encode()
op = opener.open(url2, postData)
data = op.read()
soup = BeautifulSoup(data, 'lxml')
#只爬取网页中的table标签
table = soup.findAll('table', {'class': 'table table-bordered'})[0]
#提取出每一行
rows = table.findAll('tr')
#[script.extract() for script in soup.findAll('script')]
#[style.extract() for style in soup.findAll('style')]
#soup.prettify()
#reg1 = re.compile("<[^>]*>")

#content = reg1.sub('',soup.prettify()
#print(table)
#print(rows)
#print(content)
csvFile = open('./data.csv', 'w',encoding = 'utf-8')
writer = csv.writer(csvFile,dialect='excel')



try:
    for row in rows:
        csvRow = []
        for cell in row.findAll('td'):
            csvRow.append(cell.get_text())
        if csvRow != []:
            writer.writerow(csvRow)
except:
    print('----- 爬虫出错了！-----')
finally:
    csvFile.close()



#print(data.decode())
