#!/usr/bin/python
# -*- coding: UTF-8 -*-

import urllib2
import urlparse
from bs4 import BeautifulSoup

class SpiderMain(object):
    def __init__(self):
        self.url_manager = UrlManager()
        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        self.outputer = HtmlOutputer()

    def craw(self, url):
        num = 0
        self.url_manager.add_new_url(url)
        while self.url_manager.has_new_url() and num < 10:
            url = self.url_manager.get_new_url()
            print url
            ####d
            content = self.downloader.download(url)
            if content is None:
                print "Craw failed"
                continue
            ##
            data = self.parser.parse(url, content)
            if data is None:
                print "Craw failed"
                continue
            new_urls = data[0]
            new_data = data[1]

            self.url_manager.add_new_urls(new_urls)
            self.outputer.collect(new_data)

            num += 1

        self.outputer.output()

class UrlManager(object):
    def __init__(self):
        self.new_url = set()
        self.old_url = set()

    def add_new_url(self, url):
        if url not in self.new_url and url not in self.old_url:
            self.new_url.add(url)

    def add_new_urls(self, urls):
        if urls is None or len(urls) == 0:
            return
        for url in urls:
            self.add_new_url(url)

    def has_new_url(self):
        return len(self.new_url) != 0

    def get_new_url(self):
        url = self.new_url.pop()
        self.old_url.add(url)
        return url


class HtmlDownloader(object):
    def download(self,url):
        if url is None:
            return None
        try:
            response = urllib2.urlopen(url)
        except urllib2.URLError:
            return None
        except urllib2.HTTPError:
            return None
        if response.getcode() != 200:
            return None
        return response.read()


class HtmlParser(object):
    def parse(self, url, content):
        if url is None or content is None:
            return None

        soup = BeautifulSoup(content, 'html.parser', from_encoding='utf-8')
        new_urls = self.__get_new_urls(url, soup)
        new_data = self.__get_new_data(url, soup)
        return new_urls, new_data

    def __get_new_urls(self, url, soup):
        return


    def __get_new_data(self, url, soup):
        data = {}
        data['url'] = url
        # <dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>

        #知识点
        knowledge_point = soup.find('div', class_='detailtt')
        if knowledge_point != None:
            knowledge_point = knowledge_point.find_all('a')
        if knowledge_point != None:
            data['knowledge_point'] = knowledge_point[-1].get_text()
            data['subject'] = knowledge_point[1].get_text()
        else:
            data['knowledge_point'] = ''

        question = soup.find('span', class_='uc_q_object')
        if question != None:
            data['question'] = question.get_text()
        else:
            data['question'] = ''

        question_ext = soup.find('div', class_='quesborder')
        if question_ext != None:
            data['question'] = data['question']+question_ext.get_text()

class HtmlOutputer(object):
    def __init__(self):
        self.datas = []

    def collect(self, data):
        self.datas.append(data)

    def output(self):
        with open('output.html', 'wb') as f:
            f.write('<html>')
            f.write('<head><meta charset="UTF-8"></head>')
            f.write('<body>')
            f.write('<table>')
            try:
                for data in self.datas:
                    f.write('<tr>')
                    f.write('<td>%s</td>' % data['url'].encode('utf-8'))
                    f.write('<td>%s</td>' % data['subject'].encode('utf-8'))
                    f.write('<td>%s</td>' % data['knowledge_point'].encode('utf-8'))
                    f.write('<td>%s</td>' % data['question'].encode('utf-8'))
                    f.write('<td>%s</td>' % data['answer'].encode('utf-8'))
                    f.write('</tr>')

            except Exception as e:
                print e
            finally:
                f.write('</table>')
                f.write('</body>')
                f.write('</html>')
                f.close()



if __name__=="__main__":
    spider = SpiderMain()
    baseurl = "http://www.tiku.cn/q/"
    exturl = ".html"
    for num in range(345808, 345810):
        url = baseurl+str(num)+exturl
        spider.craw(url)