# 使用request + BeautifulSoup提取12365auto投诉信息
import numpy as np
import requests
from bs4 import BeautifulSoup
import pandas as pd
import threading
import queue
import time
import multiprocessing
import datetime

class ThreadCrawl(threading.Thread):
    def __init__(self, threadName, pageQueue, dataQueue, baseUrl):
        # 调用父类初始化方法
        super(ThreadCrawl, self).__init__()
        # 线程名
        self.threadName = threadName
        # 页码队列
        self.pageQueue = pageQueue
        # 数据队列
        self.dataQueue = dataQueue
        # baseUrl
        self.baseUrl = baseUrl
        # 请求报头
        self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}

    def run(self):
        while not CRAWL_EXIT:
            try:
                # 取出一个数字，先进先出
                # 可选参数block，默认值为True
                #1. 如果对列为空，block为True的话，不会结束，会进入阻塞状态，直到队列有新的数据
                #2. 如果队列为空，block为False的话，就弹出一个Queue.empty()异常，
                page = self.pageQueue.get(False)
                url = self.baseUrl.format(page)
                #print url
                content = requests.get(url, headers = self.headers).text
                print(url)
                time.sleep(1)
                self.dataQueue.put(content)
                #print len(content)
            except:
                print("ThreadCrawl abnormal")
        print("结束 " + self.threadName)



class ThreadParse(threading.Thread):
    def __init__(self, threadName, dataQueue, filename, lock):
        super(ThreadParse, self).__init__()
        # 线程名
        self.threadName = threadName
        # 数据队列
        self.dataQueue = dataQueue
        # 保存解析后数据的文件名
        self.filename = filename
        # 锁
        self.lock = lock

    def run(self):
        columns = ['投诉编号', '投诉品牌', '投诉车系', '投诉车型', '问题简述', '典型问题', '投诉时间', '投诉状态']
        print("启动" + self.threadName)
        #df_result = pd.DataFrame(columns=['id', 'brand', 'car_model', 'type', 'desc', 'problem', 'datetime', 'status'])
        df_result = pd.DataFrame(columns=columns)
        while not PARSE_EXIT:
            try:
                html = self.dataQueue.get(False)
                df = self.parse(html)
                df_result = pd.concat([df_result, df] ,ignore_index=True)
                global total
                total = total + 1
            except:
                pass
                #print("data queue empty")
        df_result.to_csv(self.filename, encoding='utf-8', index=None)
        print("退出 " + self.threadName)

    def parse(self, html):
        columns = ['投诉编号', '投诉品牌', '投诉车系', '投诉车型', '问题简述', '典型问题', '投诉时间', '投诉状态']
        # 通过content创建BeautifulSoup对象
        soup = BeautifulSoup(html, 'html.parser')

        # 找到完整的投诉信息框
        temp = soup.find('div', class_="tslb_b")
        # 创建DataFrame

        tr_list = temp.find_all('tr')
        items = []
        for tr in tr_list:
            # ToDo：提取汽车投诉信息
            td = tr.find_all('td')
            if td:
                id = td[0].text.strip()
                brand = td[1].text.strip()
                car_model = td[2].text.strip()
                car_type = td[3].text.strip()
                desc = td[4].text.strip()
                problem = td[5].text.strip()
                datetime = td[6].text.strip()
                status = td[7].text.strip()
                items.append([id, brand, car_model, car_type, desc, problem, datetime, status])
        df = pd.DataFrame(np.array(items), columns=columns)
        return df



CRAWL_EXIT = False
PARSE_EXIT = False
base_url = 'http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-0-0-0-0-0-{}.shtml'
#记录解析次数
total = 0

#此种方法在小数据量可以，大数据量频繁请求IP会被禁止掉的
def main():
    pages = 200
    # 页码的队列
    pageQueue = queue.Queue(pages)

    for i in range(1, pages+1):
        pageQueue.put(i)

    # 采集结果(每页的HTML源码)的数据队列，参数为空表示不限制
    dataQueue = queue.Queue()

    # 创建锁
    lock = threading.Lock()

    # cpu核数设置线程数
    cpus = multiprocessing.cpu_count()
    print("cpu核数:" + str(cpus))
    #下面的数字可根据机器cpu的核数和网络的快慢进行调整，一般是网络取数据慢，所以craw的数量大些
    craws = 6
    parsers = 2

    crawlList = []
    for i in range(1, craws+1):
        craw =  "采集线程" + str(i) + "号"
        crawlList.append(craw)

    # 存储采集线程的列表集合
    threadCraws = []
    for threadName in crawlList:
        thread = ThreadCrawl(threadName, pageQueue, dataQueue, base_url)
        thread.start()
        threadCraws.append(thread)

    parseList = []
    for i in range(1, parsers + 1):
        parser = "解析线程" + str(i) + "号"
        parseList.append(parser)

    # 存储解析线程
    threadParser = []
    for i, threadName in enumerate(parseList):
        filename = "car_complain_" + str(i+1) + ".csv"
        thread = ThreadParse(threadName, dataQueue, filename, lock)
        thread.start()
        threadParser.append(thread)

    # 等待pageQueue队列为空，也就是等待之前的操作执行完毕
    while not pageQueue.empty():
        pass

    # 如果pageQueue为空，采集线程退出循环
    global CRAWL_EXIT
    CRAWL_EXIT = True

    print("pageQueue为空")

    #主线程阻塞等待craw线程全部执行完毕
    for thread in threadCraws:
        thread.join()
        print("1")

    while not dataQueue.empty():
        pass

    global PARSE_EXIT
    PARSE_EXIT = True

    # 主线程阻塞等待parser线程全部执行完毕
    for thread in threadParser:
        thread.join()
        print("2 ")

    print("谢谢使用！")
    print("Total:" + str(total))

#csv整合成一个文件
    columns = ['投诉编号', '投诉品牌', '投诉车系', '投诉车型', '问题简述', '典型问题', '投诉时间', '投诉状态']
    df_all = pd.DataFrame(columns=columns)
    for i, threadName in enumerate(parseList):
        filename = "car_complain_" + str(i + 1) + ".csv"
        df = pd.read_csv(filename, "r", encoding='utf-8', delimiter=',')
        df_all = pd.concat([df_all, df])
    df_all.to_csv('car_complain.csv',index=None)

if __name__ == "__main__":
    start_time = datetime.datetime.now()
    main()
    end_time = datetime.datetime.now()
    print(start_time)
    print(end_time)