# coding:utf-8
import urllib2
# 需要 pip install beautifulsoup4
from bs4 import BeautifulSoup
# from bs4 import element
import re
import xlwt
import sys
reload(sys)
sys.setdefaultencoding('utf8')

def GetCookie(file_name):
    f = open(file_name)
    return f.read().strip()

def OpenPage(url):
    headers = {
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': 1,
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Referer': 'https://www.nowcoder.com/profile/7027245/test/15739887/111728',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Cookie': GetCookie('./cookie.txt'),
    }
    req = urllib2.Request(url, headers=headers)
    f = urllib2.urlopen(req)
    return f.read()

def ParseMainEntry(page):
    '''
        使用 bs 解析其中的 error-order 标签
    '''
    soup = BeautifulSoup(page, 'html.parser')
    return ['https://www.nowcoder.com' + item.a.attrs["href"] for item in soup.find_all(class_='error-order')]

def ReplaceEscape(htmlstr):
    CHAR_ENTITIES = {
        'nbsp': ' ',
        '160': ' ',
        'lt': '<',
        '60': '<',
        'gt': '>',
        '62': '>',
        'amp': '&',
        '38': '&',
        'quot': '"',
        '34': '"',
    }
    re_charEntity = re.compile(r'&#?(?P<name>\w+);')
    sz = re_charEntity.search(htmlstr)
    while sz:
        # 去除&;后entity,如&gt;为gt
        key = sz.group('name')
        try:
            htmlstr = re_charEntity.sub(CHAR_ENTITIES[key], htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
        except KeyError:
            # 以空串代替
            htmlstr = re_charEntity.sub('', htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
    return htmlstr

def ReplaceHtmlTag(input):
    re_html = re.compile("</?\w+[^>]*>")
    return ReplaceEscape(re_html.sub("", input))

def ParseQuestion(soup):
    question_spans = soup.find_all(class_='question-main')[0].contents
    # 此处直接将找到的 question-main 的若干个子元素直接按照字符串的方式拼接
    question = "".join([str(item) for item in question_spans])
    # 然后再用正则表达式去标签
    return ReplaceHtmlTag(question)

def ParseChoice(soup):
    answers_items = soup.find_all(class_='result-answer-item')
    choices = [ReplaceHtmlTag(str(item)) for item in answers_items]
    # 答案之间已经包含了换行
    return "".join(choices)

def ParseAnswer(soup):
    subject_answer = str(soup.find_all(class_='result-subject-answer')[0].h1)
    # 这里获取到的字符串形如以下.
    '''
    <h1>正确答案:
    B
      你的答案:
    A
    <span class="font-orange">(错误)</span>
    </h1>
    '''
    # 通过找到 正确答案 和 你的答案 这两个关键字, 取之间的字符串
    beg = subject_answer.find("正确答案:") + len("正确答案:")
    end = subject_answer.find("你的答案:")
    return subject_answer[beg:end]

def ParseDetailPage(page):
    soup = BeautifulSoup(page, 'html.parser')
    question = ParseQuestion(soup)
    print "question: " + question
    choice = ParseChoice(soup)
    print "choice: " + choice
    answer = ParseAnswer(soup)
    print "answer: " + str(answer)
    return question, choice, answer

def WriteResult(result, excel, line):
    '''
    将结果写入到 excel 中
    result 是一个元组, 包含三个字符串,
    question, choice, answer
    直接按照 csv 格式来写, 在excel中打开会出现乱码
    '''
    excel.write(line, 1, (result[0] + '\n' + result[1]).decode('utf-8'))
    excel.write(line, 2, (result[2]).decode('utf-8'))

def Main(url):
    # 1. 获取到主页
    page = OpenPage(url)
    # 2. 解析主页内容, 获取到所有的题目的链接.
    detail_urls = ParseMainEntry(page)
    # print detail_urls
    # 3. 遍历详情页, 并解析详情页的内容
    workbook = xlwt.Workbook()
    excel = workbook.add_sheet('sheet 1')
    line = 0
    for detail_url in detail_urls:
        print "detail_url: " + detail_url
        page = OpenPage(detail_url)
        detail_result = ParseDetailPage(page)
        WriteResult(detail_result, excel, line)
        line += 1
    workbook.save('test.xls')

url = "https://www.nowcoder.com/test/question/done?tid=16149090&qid=89777"
Main(url)
