#!/usr/bin/env python
# encoding: utf-8

import urllib2
import gzip
from StringIO import StringIO
from lxml import html
from multiprocessing import Pool
import csv

def fetch_url(url):
    '''加上gzip请求头接受gzip请求，可加速'''
    # attempt to fetch the URL's contents with gzip compression
    request = urllib2.Request(url)
    request.add_header('Accept-encoding', 'gzip')
    request.add_header('User-Agent', 
        'Mozilla/5.0 (Windows NT 6.1; WOW64) \
        AppleWebKit/537.36 (KHTML, like Gecko) \
        Chrome/36.0.1985.125 Safari/537.36')
    #请求失败时尝试重试一次
    try:
        response = urllib2.urlopen(request, timeout=20)
    except:
        response = urllib2.urlopen(request, timeout=20)
    # get ready for the content
    content = ''
    # if the response is gzip-encoded as expected
    if response.info().get('Content-Encoding') == 'gzip':
      # read the encoded response into a buffer
      buffer = StringIO(response.read())
      # gzip decode the response
      f = gzip.GzipFile(fileobj=buffer)
      # store the result
      content = f.read()
      # close the buffer
      buffer.close()
    # else if the response isn't gzip-encoded
    else:
      # store the result
      content = response.read()
    # return the content
    return content

def parse_homepage(url):
    '''解析首页url，返回汽车url列表'''
    homepage_raw = fetch_url(url).decode('gbk')
    tree = html.fromstring(homepage_raw)
    cars_url_list = tree.xpath('/html/body/div/div//div[@class="newcar_txt"]/ul/li/b/a/@href')
    cars_url_list = [car_url.split('?')[0] for car_url in cars_url_list ] #去掉url中?右边的部分
    return cars_url_list

def get_car_detail_info(url):
    '''传入汽车详情页,解析url返回汽车详情'''
    print url
    car_infopage_raw = fetch_url(url).decode('gbk')
    tree = html.fromstring(car_infopage_raw)

    car_name = tree.xpath('/html/body/div/h1/a/text()')[0] or 'NULL' #车名
    image = tree.xpath('/html/body/div/div/a/img/@src')[0] or 'NULL' #预览图
    color = tree.xpath('//*[@id="color_model"]/span/a/@title') #车身颜色
    try:
        market_price = str(tree.xpath('/html/body/div/div[2]/ul/li/em/a/text()')[0]) + \
                u'万元起' #市场销售价
    except:
        market_price = u'暂无'
    guide_price = tree.xpath('/html/body/div/div/ul/li/b/text()')[0] #厂商指导价
    if guide_price != u'暂无':
        guide_price += u'万元'
    #production_mode = tree.xpath('/html/body/div/div[2]/div/div/dl[1]/dd[1]/text()')[0] #生产方式
    category = tree.xpath('/html/body/div/div[2]/div/div/dl[1]/dd[2]/text()')[0] #类型级别
    #warranty = tree.xpath('/html/body/div/div[2]/div/div/dl[1]/dd[3]/text()')[0] #保修政策
    engine = tree.xpath('/html/body/div/div[2]/div/div/dl[2]/dd[1]/a/text()') #排量
    gearbox = tree.xpath('/html/body/div/div[2]/div/div/dl[2]/dd[2]/a/text()') #变速箱
    #equipment = tree.xpath('/html/body/div/div[2]/div/div/dl[3]/dd/div/a/text()') #配置

#    car_detail_info = [car_name, image, color, market_price,\
#            guide_price, production_mode, category, warranty,\
#            cc, gearbox, equipment]
    #汽车名,市场销售价,厂商指导价,车身颜色,图片,发动机,变速箱,车身结构,详情页
    car_detail_info = [car_name, market_price, guide_price, color, image, engine, gearbox, category, url]
    return car_detail_info

def parse_cars_detail_info(cars_detail_info):
    '''解析cars_detail_info数组，仅查询解析结果用'''
    for car_detail_info in cars_detail_info:
        print(u'汽车名: ' + car_detail_info[0])
        print(u'市场销售价: ' + car_detail_info[1])
        print(u'厂商指导价: ' + car_detail_info[2])
        print(u'车身颜色: ' + ' '.join(car_detail_info[3]))
        print(u'图片: ' + car_detail_info[4])
        print(u'发动机: ' + ' '.join(car_detail_info[5]))
        print(u'变速箱: ' + ' '.join(car_detail_info[6]))
        print(u'车身结构: ' + car_detail_info[7])
        print(u'详情页: ' + car_detail_info[8])
        print('=================================================')

def save_csv(cars_detail_info):
    '''将cars_detail_info数组格式化为csv并保存'''
    lines_list = [['汽车名', '市场销售价', '厂商指导价', '车身颜色', '图片',\
                    '发动机', '变速箱', '车身结构', '详情页']]
    for car_info in cars_detail_info:
        line = []
        line.append(car_info[0].encode('utf-8'))
        line.append(car_info[1].encode('utf-8') or 'NULL')
        line.append(car_info[2].encode('utf-8') or 'NULL')
        line.append(' '.join(car_info[3]).encode('utf-8') or 'NULL')
        line.append(car_info[4] or 'NULL')
        line.append(' '.join(car_info[5]).encode('utf-8') or 'NULL')
        line.append(' '.join(car_info[6]).encode('utf-8') or 'NULL')
        line.append(car_info[7].encode('utf-8') or 'NULL')
        line.append(car_info[8])
        lines_list.append(line)
    print u'写入csv文件...'
    with open('xcar.csv','wb') as csvfile:
        csv.writer(csvfile,quoting=csv.QUOTE_MINIMAL).writerows(lines_list)

def main():
    home_page_url = 'http://www.xcar.com.cn/'
    pool = Pool(processes=10)
    cars_url_list = parse_homepage(home_page_url)
    cars_detail_info = pool.map_async(get_car_detail_info, cars_url_list).get(120)
    parse_cars_detail_info(cars_detail_info)
    save_csv(cars_detail_info)

if __name__ == '__main__':
    main()
