# -*- coding: utf-8 -*-

import random
import requests
from bs4 import BeautifulSoup
import chardet

class Gather:
    """采集器，从指定URl爬取得到HTML"""

    UA = ['Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
          'Opera/9.25 (Windows NT 5.1; U; en)',
          'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
          'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
          'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
          'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
          "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
          "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
        ]

    def __init__(self, url):
        if url.startswith("http:"):
            self.url = url
        elif url.startswith("https:"):
            self.url = url
        else:
            self.url = "http://" + url
        self.headers = {
                "User-Agent": self.get_ua(),
                "Connection:": "keep-alive"
                }
        self.proxies = None

    def set_headers(self, headers):
        """
        @param headers: HTTP headers
        @param type:    dict
        """
        self.headers.update(headers)

    def set_proxies(self, proxies):
        """
        @param headers: HTTP proxies
        @param type:    dict
        """
        self.proxies = proxies

    def gather(self, method="GET"):
        try:
            if method == "GET":
                text = self._gather_get().content
            else:
                text = self._gather_post().content

            encoding = chardet.detect(text[:1000])["encoding"]
            return text.decode(encoding, "ignore")
        except requests.ConnectionError:
            return ""


    def _gather_get(self, data=None, headers=None):
        if headers:
            self.headers.update(headers)
        if self.proxies:
            r = requests.get(self.url, data=data, headers=self.headers, proxies=self.proxies)
        else:
            r = requests.get(self.url, data=data, headers=self.headers)
        return r

    def _gather_post(self, data=None, headers=None):
        if headers:
            self.headers.update(headers)
        if self.proxies:
            r = requests.post(self.url, data=data, headers=self.headers, proxies=self.proxies)
        else:
            r = requests.post(self.url, data=data, headers=self.headers)
        return r

    def get_ua(self, index=None):
        if index:
            return self.UA[index]

        return self.UA[random.randint(0, len(self.UA)-1)]


class Parser:

    def __init__(self, html):
        self.html = html

    def parser(self, *args, **kwargs):
        soup = self._soup(*args, **kwargs)
        return soup.prettify()

    def _soup(self):
        self.soup = BeautifulSoup(self.html, "lxml")
        return self.soup


if __name__ == '__main__':
    g = Gather("http://www.baidu.com")
    print g.get_ua()
    print g.headers
    parser = Parser(g.gather())
    print parser.parser()
