import requests
from lxml import etree
import csv
import os
import time
import json
import pandas as pd
import re
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoProject2.settings')
django.setup()
from myApp.models import CarMessage

class Spider(object):
    def __init__(self):
        self.SpiderUrl = 'https://www.dongchedi.com/motor/pc/car/rank_data?aid=1839&app_name=auto_web_pc&city_name=%E5%AE%9C%E6%98%8C&count=20&month=&new_energy_type=&rank_data_type=11&brand_id=&price=&manufacturer=&outter_detail_type=&nation=0'
        self.headers = {
            'User-Agent' : 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Mobile Safari/537.36'
        }

    def init_csv(self):
        if not os.path.exists('./temp.csv'):
            with open('./temp.csv', 'w',encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow(["brandName","seriesName","carImg","sale","price","manufacture",
                                 "rank","carModel","energyType","marketTime","inSure"])
        # print("创建成功")

    def get_page(self):
        with open('./spiderPage.txt','r') as g_p:
            return g_p.readlines()[-1].strip()

    def set_page(self,newPage):
        with open('./spiderPage.txt','a') as s_p:
            s_p.write('\n'+str(newPage))

    def main(self):
        count = self.get_page()
        params = {
            'offset':int(count)
        }
        print("数据从{}条开始爬取".format(int(count)+1))
        pageJson = requests.get(self.SpiderUrl,headers=self.headers,params=params).json()
        pageJson = pageJson["data"]["list"]
        for index,car in enumerate(pageJson):
            # print(car)
            carData = []
            print("正在爬取第%d"%(index+1)+"条数据")
            #品牌
            carData.append(car["brand_name"])
            #print(car["brand_name"])
            #车名
            carData.append(car["series_name"])
            #车图片
            carData.append(car["image" ])
            #销量
            carData.append(car["count"])
            #价格
            price = []
            price.append(car["min_price"])
            price.append(car["max_price"])
            carData.append(price)
            #厂商
            carData.append(car["sub_brand_name"])
            #排名
            carData.append(car["rank"])
            ##第二个页面
            carNumber = car["series_id"]
            infoHTML = requests.get("https://www.dongchedi.com/auto/params-carIds-x-%s"%carNumber,headers=self.headers)
            infoHTMLpath = etree.HTML(infoHTML.text)
            with open('car_info.html', 'w', encoding='utf-8') as file:
                file.write(infoHTML.text)
            #汽车类型
            # carModel = infoHTMLpath.xpath("//div[@data-row-anchor='jb']/div[2]/div/text()")
            carModels = infoHTMLpath.xpath("//div[@data-search-id='jb级别']//div[@class='index_grid-col__TeSvP']/text()")[0]
            # print(carModels)
            carData.append(carModels)
            #能源类型
            # /div[@class='cell_normal__37nRi']/text()")
            energyType = infoHTMLpath.xpath("//div[@data-search-id='fuel_form能源类型']/div[3]/text()")[0]
            # print(energyType)
            carData.append(energyType)
            #上市时间
            marketTime = infoHTMLpath.xpath("//div[@data-search-id='market_time上市时间']/div[2]/text()")[0]
            # print(marketTime)
            carData.append(marketTime)
            #保修
            inSure = infoHTMLpath.xpath("//div[@data-search-id='period整车保修期限']/div[2]/text()")[0]
            # print(inSure)
            carData.append(inSure)
            print(carData)
            self.saveToCsv(carData)
            # break

        self.set_page(int(count)+10)
        self.main()

    #将爬取的数据存入csv
    def saveToCsv(self,resultData):
        with open('./temp.csv','a',encoding='utf-8',newline='') as f:
            writer = csv.writer(f)
            writer.writerow(resultData)


    def clearCsv(self):
        df = pd.read_csv('./temp.csv')
        #清除空值
        df.dropna(inplace=True)
        #重复值
        df.drop_duplicates(inplace=True)
        print("总数量：%d"%df.shape[0])
        return df.values

    #存入数据库
    def saveToSql(self):
        data = self.clearCsv()
        for car in data:
            CarMessage.objects.create(
                brandName = car[0],
                seriesName = car[1],
                carImg = car[2],
                sale = car[3],
                price = car[4],
                manufacture = car[5],
                rank = car[6],
                carModel = car[7],
                energyType = car[8],
                marketTime = car[9],
                inSure = car[10],
            )

if __name__ == '__main__':
    spiderObj = Spider()
    # spiderObj.init_csv()
    # spiderObj.main()
    spiderObj.saveToSql()