#!/usr/bin/env python  
# -*- coding: utf-8 -*-  
from HTMLParser import HTMLParser
from pyquery import PyQuery as pq
from lxml import etree
import sys,re,urllib2,urllib,time,xlwt,os

# type = sys.getfilesystemencoding() 
reload(sys)  
sys.setdefaultencoding('utf8')
#糗事百科爬虫类
class Goo:        
    #开始方法
    def __init__(self):
        self.pageIndex = 1
        self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
        self.headers = { 'User-Agent' : self.user_agent }
        #存放段子的变量，每一个元素是每一页的段子们
        self.stories = []
        #存放程序是否继续运行的变量
        self.enable = False
    #传入某一页的索引获得页面代码
    def getPage(self):
            url = 'https://used.carnews.com/UsedCarShop?PageSize=36'
            # proxy_support = urllib2.ProxyHandler({"http":"http://192.168.2.6:80"})
            # opener = urllib2.build_opener(proxy_support)
            # urllib2.install_opener(opener)
            #构建请求的request
            request = urllib2.Request(url,headers = self.headers)
            #利用urlopen获取页面代码
            response = urllib2.urlopen(request).read()
            #将页面转化为UTF-8编码
            pageCode = response.decode('utf-8').encode(type) 

            self.capture(pageCode)

    def capture(self,html):
        # jq = pq(etree.fromstring(html))
        jq = pq(html)
        li = jq('li.uc-advanced-list-item')
        try:
            # print item
            for li in li.items():
                print li.html()
        except:
            print u"發生錯誤"
                    # .decode(‘gbk’, ‘ignore’).encode(‘utf-8′)

    def start(self):
        self.getPage()

spider = Goo()
spider.start()












# page = 1
# url = 'http://www.qiushibaike.com/hot/page/' + str(page)
# user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
# headers = { 'User-Agent' : user_agent }
# try:
#     request = urllib2.Request(url,headers = headers)
#     response = urllib2.urlopen(request).read()
#     # response = 'an example word:cat!!'
#     content =  response.decode("UTF-8").encode(type) 
#     # print content
#     pattern = re.compile(r'<h2>\n.*?\n</h2>')
#     # if pattern:
#       # print pattern.group()
#     # pattern = re.match(r'<img.*?>',content)
#     # if pattern:
#        # print("yo it's a {}".format(pattern.group()))
#     items = re.findall(pattern,content)
#     # print (items)
#     for item in items:
#       print item
# except urllib2.URLError, e:
#     if hasattr(e,"code"):
#         print e.code
#     if hasattr(e,"reason"):
#         print e.reason
