# -*- coding:utf-8 -*-
# @Author:Cheng Lei 1037654919@qq.com
# @Time : 2022/10/20 下午1:47
# @FileName: scrapy_dongchedi.py
# @Software: PyCharm
# 爬取了懂车帝质量反馈数据

import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import re
from lxml import etree
from retrying import retry
import datetime
import urllib3
from tqdm import tqdm
import json

urllib3.disable_warnings()#强制取消警告
def get_proxy():
    return requests.get('http://127.0.0.1:5010/get/').json()

headers={}
#爬取懂车帝
class Dongchedi:
    def __init__(self, log_flag=False):
        self.log_flag = log_flag

    #第一步：获取懂车帝车型对应编号
    def get_car_num(self):
        base_url ='https://www.dongchedi.com/auto/series/{}'
        print('开始获取懂车帝车型对应编号,base_url为：',base_url)
        datax=[]
        for i in tqdm(range(1,7000)):
            url = base_url.format(i)
            if i%100 ==0:
                print ('目前爬取至{}/6212，网址：{}\n休息5秒'.format(i,url))
                pd.DataFrame(datax,
                             columns=['车系', '车型', '代号']).to_excel('car_num_list_{}.xlsx'.format(i))
                time.sleep(5)
            try:
                response = requests.get(url, timeout=10)
                html2 = response.text
                soup2 = BeautifulSoup(html2, 'lxml')
                datas =soup2.find_all('div',class_ = 'new-main tw-overflow-hidden new')[0].find('div',class_ ='breadcrumb_wrap__2GLeU').find('p').find_all('span')

                car_series = datas[2].get_text()
                car_style  = datas[3].get_text()
            except:
                car_series =None
                car_style = None
            datax.append([car_series,car_style,i])

        pd.DataFrame(datax,
                     columns=['车系','车型','编号']).to_excel('car_num_list.xlsx')
        return

    #质量反馈,备用
    def get_quality(self):

        base_url_quality= 'https://www.dongchedi.com/community/{}/quality'
        test_dic ={'领克01':1622}
        for key,value in test_dic.items():
            datay =[]
            url = base_url_quality.format(value)
            response = requests.get(url, timeout=20,proxies=get_proxy(),verify=False)
            html2 = response.text
            soup2 = BeautifulSoup(html2, 'lxml')
            datas =soup2.find_all('div',class_ ='jsx-3811145822 tw-col-span-40 md:tw-col-span-30 tw-row-span-3 tw-order-2 md:tw-order-1 2xl:tw-px-60')[0]
            lis =datas.find('div',class_ ='jsx-1325911405 tw-flex tw-text-12 tw-text-common-black tw-justify-center tw-my-24')\
                .find('ul',class_='jsx-1325911405 tw-flex').find_all('li')
            try:
                pagemax=int( lis[-2].get_text())
            except:
                pagemax =10
            print('begin {},code:{},url:{},pages:{}'.format(key, value, url,pagemax))
            for page in range(1,pagemax+1):
                url2 =url+'-{}'.format(page)
                datax = Dongchedi.get_quality_text(self,url2)
                datay += datax
                pd.DataFrame(datay,
                             columns=['平台','车型名','车型代号','链接','用户名',
                                      '描述', '时间信息', '爬取时间', '评论数', '点赞数', '质量反馈']).\
                    to_excel('quality_{}.xlsx'.format(key))

    #获取质量反馈详细信息
    def get_quality_text(self,url,length=30):
        print('begin url:',url)
        datax =[]
        car_code = re.findall('\d+', url)[0]
        @retry
        def get_func(url):
            s = requests.session()
            s.close()  # 关闭多余链接
            response = requests.get(url, timeout=10, headers=headers,proxies=get_proxy(),verify=False)
            html2 = response.text
            soup2 = BeautifulSoup(html2, 'lxml')
            return soup2

        soup2 = get_func(url)
        try:
            car_name = soup2.find_all('div', class_='breadcrumb_root__3OyEN')[0].find('span',
                                                                                      class_='tw-leading-20 undefined icon-arrow-ic-r').get_text()
        except:
            car_name = None
        sections = soup2.find_all('div',
                                  class_='jsx-3811145822 tw-col-span-40 md:tw-col-span-30 tw-row-span-3 tw-order-2 md:tw-order-1 2xl:tw-px-60')[0]. \
            find('div', class_='jsx-3811145822 data-wrapper tw-pt-12').find_all('section')
        for i in range(min(length,len(sections))):
            try:
                user_name = sections[i].find('div', class_='tw-flex tw-items-center').\
                    find('div',class_='tw-ml-16 tw-flex tw-flex-col tw-justify-center').get_text()
            except:
                user_name = None
            try:
                car_info = sections[i].find('div', class_='jsx-81802501 jsx-2089696349 tw-mt-12').get_text()
            except:
                car_info = None
            try:
                quality_fankui = sections[i].find('div',
                                                  class_='jsx-81802501 jsx-2089696349 tw-flex tw-overflow-hidden tw-mt-6').get_text()
                href = 'https://www.dongchedi.com' + sections[i].find('div',
                                                                      class_='jsx-81802501 jsx-2089696349 tw-flex tw-overflow-hidden tw-mt-6').find(
                    'a', href=True)['href']
            except:
                try:
                    quality_fankui = sections[i].find('div',
                                                      class_='jsx-81802501 jsx-2089696349 tw-flex tw-overflow-hidden tw-mt-12').get_text()
                    href = 'https://www.dongchedi.com' + sections[i].find('div',
                                                                          class_='jsx-81802501 jsx-2089696349 tw-flex tw-overflow-hidden tw-mt-12'). \
                        find('a', href=True)['href']
                except:
                    quality_fankui = None
                    href = url
            try:
                time_info = sections[i].find('div',
                                             class_='jsx-1875074220 tw-flex tw-items-center tw-flex-1 tw-overflow-hidden tw-mr-24 tw-text-12 md:tw-text-14 xl:tw-text-14'). \
                    find('span').get_text().split('回复')[0]
            except:
                time_info = None
            now = datetime.datetime.now()
            try:
                count = sections[i].find('div',
                                         class_='jsx-1875074220 right tw-flex tw-items-center tw-flex-none tw-text-12 md:tw-text-14 xl:tw-text-14').get_text()
                count_comment = re.findall('\d+', count)[0]
                count_agree = re.findall('\d+', count)[1]
            except:
                count_comment = 0
                count_agree = 0
            flag = 0  # 标识
            global quality_id
            quality_id += 1
            datax.append(
                [quality_id, 'dongchedi', flag, car_name, car_code, href, user_name, car_info, time_info, now,
                 count_comment, count_agree, quality_fankui])
        return datax


    #首次获取社区统计，基于车型编号；首次执行时需要，后续不需要执行了，由get_community(self)代替
    def first_get_community(self):
        base_url ='https://www.dongchedi.com/community/{}/{}'
        community_list=['','hot','selected','energy','modification','wenda','pricediscuss','quality','dakareyi','shangjia']
        files = pd.read_excel('car_num_list.xlsx')
        datax=[]
        files = files.fillna('')
        print('\nbegin community count,nase_url:{},\n'
              'all items:{}\n'
              '车型数目：{}'.format(base_url,community_list,len(files)))
        for i in range(len(files)):
            series =files.iloc[i]['车系']
            style =files.iloc[i]['车型']
            code = files.iloc[i]['代号']
            if style== '':
                continue
            if series not in ['几何汽车','吉利汽车','极氪','领克']:
                continue

            now = datetime.datetime.now()
            counts=[series,style,code,now]
            print(style,base_url.format(code,'{}'))
            for name in community_list:
                url =base_url.format(code,name)
                try:
                    count = Dongchedi.get_all_count(self,url)
                except:
                    count = 0
                counts.append(count)
            datax +=[counts]

            #首次执行时同步爬取质量反馈
            '''
            base_url_quality = 'https://www.dongchedi.com/community/{}/quality'
            datay =[]
            url_quality = base_url_quality.format(code)
            try:
                s=requests.session()
                s.close()
                response = requests.get(url_quality, timeout=20,proxies=get_proxy(),verify=False)
                html2 = response.text
                soup2 = BeautifulSoup(html2, 'lxml')
                datas =soup2.find_all('div',class_ ='jsx-3811145822 tw-col-span-40 md:tw-col-span-30 tw-row-span-3 tw-order-2 md:tw-order-1 2xl:tw-px-60')[0]
                lis =datas.find('div',class_ ='jsx-1325911405 tw-flex tw-text-12 tw-text-common-black tw-justify-center tw-my-24')\
                    .find('ul',class_='jsx-1325911405 tw-flex').find_all('li')
                pagemax=int( lis[-2].get_text())
            except:
                pagemax =10
            print('begin {},code:{},url:{},pages:{}'.format(style, code, url_quality,pagemax))
            for page in range(1,pagemax+1):
                url2 =url_quality+'-{}'.format(page)
                dataxx = Dongchedi.get_quality_text(self,url2)
                datay += dataxx

            # pd.DataFrame(datay,
            #                  columns=['quality_id','平台','flag','车型名','车型代号','链接','用户名',
            #                           '描述', '时间信息', '爬取时间', '评论数', '点赞数', '质量反馈']).\
            #     to_excel('quality_{}.xlsx'.format(style))
            df3 = pd.DataFrame(datay,
                               columns=['quality_id','pingtai','flag','car_style_name','car_code','href','user_id',
                                        'quality_desc','time_info','scrapy_time','count_comment','count_agree','quality_text'])
            try:

                df3.to_sql('dongchedi_community_quality', engine, if_exists='append', index=False)
                print('{}入库成功'.format(style))
            except:
                print('{}入库失败'.format(style))
                pass
            '''
        pd.DataFrame(datax,
                     columns=['车系', '车型', '代号','爬取时间', '总动态数', '热门', '精华',
                              '能耗','改装', '问答', '价格讨论', '质量反馈', '评车','商家'])\
            .to_excel('community_count_geely.xlsx')
        df = pd.DataFrame(datax,
                     columns=['car_series','car_style_name','car_code','scrapy_time',
                              'all_community_count','hot_community_count','selected_community_count',
                              'energy_community_count','modification_community_count','wenda_community_count','pricediscuss_community_count',
                              'quality_community_count','dakareyi_community_count','shangjia_community_count'])



    # 问答
    def get_community_wenda(self):
        # base_url = 'https://www.dongchedi.com/community/{}/{}'
        # community_list = ['', 'hot', 'selected', 'energy', 'modification', 'wenda', 'pricediscuss', 'quality',
        #                   'dakareyi',
        #                   'shangjia']

        # 读取数据库中社区动态条数表，和当前社区动态条数做对比，用于判断社区动态是否更新内容
        # new method 接口获取数据，判断lists里边链接是否出现过
        # sql1 = 'select  *  from gic_bigdata.di_portal_user_complain_dongchedi_quality_count;'
        # count_data = pd.read_sql(sql1, engine_prodcet)
        count_data = pd.DataFrame([['理想汽车', '理想L9', '5227']],columns=['name','style','code'])
        for i in range(len(count_data)):
            style = count_data.iloc[i]['name']
            code = count_data.iloc[i]['code']
            url = 'https://www.dongchedi.com/motor/pc/ugc/community/cheyou_list?aid=1839&app_name=auto_web_pc&series_or_motor_id={}&sort_type=1&tab_name=wenda&offset=0&count=30'.format(
                code)
            try:
                html2 = requests.get(url, timeout=10, proxies=get_proxy(), verify=False).text
            except:
                html2 = None
                print('获取data失败')
            s = requests.session()
            s.close()
            time.sleep(1)

            datay = []
            try:
                jsondata = json.loads(html2)
                datas = jsondata['data']['cheyou_list']
                if datas is not None:
                    for data in datas:
                        gid = data['gid']
                        href = 'https://www.dongchedi.com/ugc/article/{}'.format(gid)
                        if [href] in hreflist:
                            continue
                        else:
                            hreflist.append([href])
                        platform = 'dongchedi'
                        flag = 0
                        vehname = style
                        vehcode = code
                        scrapytime = datetime.datetime.now()

                        username = data['profile_info']['name']
                        desc = data['title']
                        timestamp = data['display_time']
                        time_local = time.localtime(timestamp)
                        # 转换成新的时间格式(2016-05-05 20:28:54)
                        display_time = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
                        digg_count = data['digg_count']
                        comment_count = data['comment_count']
                        content = data['content']
                        datay.append(
                            [platform, flag, vehname, vehcode, href, username, desc, display_time, scrapytime,
                             comment_count,
                             digg_count, content])
                        pd.DataFrame(datay,
                                     columns=['平台', '车型名', '车型代号', '链接', '用户名', '描述', '时间信息', '爬取时间',
                                              '评论数', '点赞数', '质量反馈']).to_excel('wenda_test_{}.xlsx'.format(style))
                # df3 = pd.DataFrame(datay,
                #                    columns=['pingtai', 'flag', 'car_style_name', 'car_code', 'href',
                #                             'user_id',
                #                             'quality_desc', 'time_info', 'scrapy_time', 'count_comment', 'count_agree',
                #                             'quality_text'])
                # df3.insert(0, 'quality_id', None)
                # try:
                #     df3.to_sql('di_portal_user_complain_dongchedi', engine_prodcet, if_exists='append', index=False)
                #     print('{}入库成功'.format(style), datetime.datetime.now())
                # except:
                #     print('{}入库失败'.format(style), datetime.datetime.now())
            except:
                print('data解析失败')
        # try:
        #     count_data.to_sql('di_portal_user_complain_dongchedi_quality_count', engine_prodcet,
        #                       if_exists='replace',
        #                       index=False)
        #     print('di_portal_user_complain_dongchedi_quality_count入库成功')
        # except:
        #     print('di_portal_user_complain_dongchedi_quality_count入库失败')
        return print('end function get_community')

    #获取社区统计之动态数量，公用 get_community调用
    def get_all_count(self,url):
        try:
            for __ in range(5):
                s = requests.session()
                s.close()  # 关闭多余链接
                response = requests.get(url, timeout=20,headers=headers,proxies=get_proxy(),verify=False)
                html2 = response.text
                soup2 = BeautifulSoup(html2, 'lxml')
                datas = soup2.find_all('div', class_='jsx-3811145822 tw-grid tw-grid-cols-40 tw-gap-12 tw-mt-40')[0]. \
                    find('div',
                         class_='jsx-3811145822 tw-flex tw-w-full tw-h-auto tw-items-center tw-justify-between tw-mt-12 tw-px-16')
                count = re.findall('\d+', datas.text)[0]
                if count:
                    break
        except:
            count = 0
        return int(count)

    #以下功能备用
    #基于车型编号,获取所有车系的评分
    def koubei_scores(self):
        base_url ='https://www.dongchedi.com/auto/series/score/{}-x-x-x-x-x-x'
        files = pd.read_excel('car_num_list.xlsx')
        datax=[]
        files = files.fillna('')
        print('\n begin koubei_scores ,base_url:{}\n'
              '车型数目：{}'.format(base_url,len(files)))
        for i in range(0,len(files)):
            series =files.iloc[i]['车系']
            style =files.iloc[i]['车型']
            code = files.iloc[i]['代号']
            if style== '':
                continue
            counts=[series,style,code]
            print(style,base_url.format(code,'{}'))
            url =base_url.format(code)
            count = Dongchedi.get_koubei_scores(self,url)
            counts += count
            datax.append(counts)
            if i%100==99: #及时保存
                pd.DataFrame(datax,
                         columns=['车系', '车型', '代号','爬取时间','评价人数', '综合', '外观', '内饰', '配置', '空间', '舒适性',
                                  '操控', '动力']).to_excel(
                'koubei_scores_1.xlsx')
                print('保存历史数据成功，休息一下：10s')
                time.sleep(10)
        pd.DataFrame(datax,
                     columns=['车系', '车型', '代号','爬取时间', '评价人数', '综合', '外观', '内饰','配置', '空间', '舒适性', '操控','动力']).to_excel(
            'koubei_scores.xlsx')
        print('end function')
        return
    #获取车型评分
    def get_koubei_scores(self,url):
        #url = 'https://www.dongchedi.com/auto/series/score/4857-x-x-x-x-x-x'
        response = requests.get(url, timeout=5)
        s = requests.session()
        s.close()
        html2 = response.text
        soup2 = BeautifulSoup(html2, 'html.parser')
        html_tree = etree.HTML(html2)
        now = datetime.datetime.now()
        try:

            result = html_tree.xpath('/html/body/div[1]/div[1]/div[2]/div[2]/section[1]/div/div[1]/div[2]/div[2]/div/span')
            number =int(re.findall('\d+',result[0].text)[0])
            results= html_tree.xpath('/html/body/div[1]/div[1]/div[2]/div[2]/section[1]/div/div[2]/div[2]')
            comp = results[0].xpath('ul[1]/li[2]')[0].text
            view = results[0].xpath('ul[2]/li[2]')[0].text
            Interior = results[0].xpath('ul[3]/li[2]')[0].text
            config = results[0].xpath('ul[4]/li[2]')[0].text
            space = results[0].xpath('ul[5]/li[2]')[0].text
            Comfort = results[0].xpath('ul[6]/li[2]')[0].text
            Control = results[0].xpath('ul[7]/li[2]')[0].text
            power = results[0].xpath('ul[8]/li[2]')[0].text
        except:
            number =0
            comp =0.0
            view =0.0
            Interior =0.0
            config =0.0
            space =0.0
            Comfort =0.0
            Control =0.0
            power =0.0
        return [now,number,comp,view,Interior,config,space,Comfort,Control,power]
    #口碑获取
    def get_koubei(self):
        urls1=['https://www.dongchedi.com/auto/series/score/5306-x-S0-x-x-x-1',
               'https://www.dongchedi.com/auto/series/score/276-x-S0-x-x-x-2','https://www.dongchedi.com/auto/series/score/289-x-S0-x-x-x-1']

        lingke_dic={'领克06':4342,'领克09 PHEV':5283,'领克01':1622,'领克09':5081,'领克03':1623,'领克05':3463,'领克02 Hatchback':4970,'领克02':2765,
                    '领克01 PHEV':2883,'领克06 PHEV':4340,'领克05 PHEV':4156}
        test_dic ={'领克05':3463,'领克02 Hatchback':4970,'领克02':2765,
                    '领克01 PHEV':2883,'领克06 PHEV':4340,'领克05 PHEV':4156}
        for key,value in test_dic.items():
            time.sleep(10)
            count_susss=0
            count_fails=0
            basic_url = 'https://www.dongchedi.com/auto/series/score/{}-x-x-x-x-x'
            url = basic_url.format(value)
            datay = []
            try:
                html = requests.get(url, timeout=10).text
                soup = BeautifulSoup(html, 'lxml')
                datas = soup.find_all('div',
                                      class_='jsx-1325911405 tw-flex tw-text-12 tw-text-common-black tw-justify-center tw-my-24')
                pagemax = int(datas[0].find_all('li')[-2].text)
            except:pagemax=10
            print('begin:{} 车型，共有页数：{},主网页：{}'.format(key,pagemax,url))
            for page in range(pagemax):
                time.sleep(1)
                url2 = 'https://www.dongchedi.com/auto/series/score/{}-x-S0-x-x-x-{}'.format(value,page+1)
                print('begin page :{} 主页：{}'.format(page + 1,url2))
                response = requests.get(url2, timeout=5)
                html2 =response.text
                try:
                    datax,count_suss,count_fail = Dongchedi.get_koubei_text(self,url2)
                    datay += datax
                    pd.DataFrame(datay,
                                     columns=['车型', '用户名', '驾龄', '购车时间', '购车城市', '购车价格', '油耗', '综合评分',
                                              '外观', '内饰',
                                              '配置', '空间', '舒适性', '操控', '动力', '懂车分', '评价时间', '评论数',
                                              '点赞数', '口碑文本', '评论']).to_excel('koubei_{}.xlsx'.format(key))
                    count_susss +=count_suss
                    count_fails +=count_fail
                except:pass

            print('{} 车型结束,成功率为：{}/{}'.format(key,count_susss,count_susss+count_fails))
    #获取口碑页详细信息
    def get_koubei_text(self,url2):
        # url2 = 'https://www.dongchedi.com/auto/series/score/{}-x-S0-x-x-x-{}'.format(value,page+1)
        datax=[]
        response = requests.get(url2, timeout=5)
        html2 = response.text
        soup2 = BeautifulSoup(html2, 'lxml')
        datas = soup2.find_all('article',
                               class_='tw-grid tw-grid-cols-40 tw-mt-12 tw-text-12 tw-leading-5 tw-bg-white tw-py-16 tw-px-12')
        print(url2,' 条数:',len(datas))
        for i in range(len(datas)):
            try:
                user = datas[i].find_all('aside', class_='tw-col-span-40 xl:tw-col-span-12 xl:tw-pr-12')[0].find('header')
                id = user.find('div', class_='tw-ml-12 tw-flex tw-flex-col tw-justify-center').find('h2').find(
                    'a').get_text()
                drive_year = user.find('div', class_='tw-ml-12 tw-flex tw-flex-col tw-justify-center').find('h3').get_text()

                infos = datas[i].find_all('aside', class_='tw-col-span-40 xl:tw-col-span-12 xl:tw-pr-12')[0].find('section')
                vehicle = infos.find('span', class_='tw-flex-1').get_text()
                scores = infos.find('div', class_='tw-flex xl:tw-block')
                buy_info = scores.find_all('', class_='tw-font-semibold xl:tw-font-normal')
                try:
                    buy_time = buy_info[0].text
                except:
                    buy_time = None
                try:
                    buy_city = buy_info[1].text
                except:
                    buy_city = None
                try:
                    buy_price = buy_info[2].text
                except:
                    buy_price = None
                try:
                    buy_iol = buy_info[3].text
                except:
                    buy_iol = None
                score = scores.find_all('p', class_='styles_score-item__2KcxU')
                try:
                    comp = score[0].text
                except:
                    comp = None
                try:
                    view = score[1].text
                except:
                    view = None
                try:
                    Interior = score[2].text
                except:
                    Interior = None
                try:
                    config = score[3].text
                except:
                    config = None
                try:
                    space = score[4].text
                except:
                    space = None
                try:
                    Comfort = score[5].text
                except:
                    Comfort = None
                try:
                    Control = score[6].text
                except:
                    Control = None
                try:
                    power = score[7].text
                except:
                    power = None
                try:
                    koubei =datas[i].find_all('section', class_='tw-col-span-40 tw-pt-16 xl:tw-col-span-28 tw-p-12 xl:tw-pt-0')[0]
                    user_score = koubei.find('header').find('a').get_text()
                    koubei_text = koubei.find('p', class_=True).text
                    comment = koubei.find('section').get_text()
                    koubei_info = koubei.find('div',
                                              class_='tw-flex tw-justify-between tw-text-color-gray-700 tw-mt-6 tw-mb-4 tw-text-14 tw-leading-20')
                    try:
                        koubei_time = koubei_info.find('span').get_text()
                        count = koubei_info.find('div').get_text()
                        number = re.findall("\d+", count)
                        count_comment = number[0]
                        count_agree = number[-1]
                    except:
                        koubei_time=None
                        count_comment = 0
                        count_agree = 0
                except:
                    user_score =None
                    koubei_time =None
                    count_comment =None
                    koubei_text =None
                    comment =None
                    count_agree = None
                datax.append([vehicle, id, drive_year, buy_time, buy_city, buy_price, buy_iol, comp, view, Interior,
                              config, space, Comfort, Control, power, user_score, koubei_time, count_comment, count_agree,
                              koubei_text, comment])
            except:
                print(url2, ':第{}条获取失败'.format(i))
        return datax

    # 获取各车型各款价格
    def get_avg_pricres(self):
        print('begin:get_avg_pricres')
        start = time.time()
        base_url ='https://www.dongchedi.com/auto/params-carIds-x-{}'
        files = pd.read_excel('car_num_list.xlsx')
        datax=[]
        files = files.fillna('')
        print('\n begin koubei_scores ,base_url:{}\n'
              '车型数目：{}'.format(base_url,len(files)))
        for i in range(0,len(files)):  #len(files)
            series =files.iloc[i]['车系']
            style =files.iloc[i]['车型']
            code = files.iloc[i]['代号']
            if style== '':
                continue
            counts=[series,style,code]
            print(style,base_url.format(code,'{}'))
            url =base_url.format(code)
            prices = Dongchedi.get_avg_pricres_detail(self,url)
            counts += prices #prices=[priceslist,avg_price,median_price]
            datax.append(counts)
            if i%100==99: #及时保存
                pd.DataFrame(datax,
                         columns=['车系', '车型', '代号','价格列表','平均价格', '价格中位数','中位数对应车型']).to_excel(
                'median_pricres_0.xlsx')
                print('保存历史数据成功，休息一下：10s')
                time.sleep(10)
                end = time.time()
                print('耗时：',round(end - start, 3))
        pd.DataFrame(datax,
                     columns=['车系', '车型', '代号','价格列表','平均价格', '价格中位数','中位数对应车型']).to_excel(
            'median_pricres.xlsx')
        end = time.time()
        print('end function,耗时：',round(end - start, 3))
        return
    def get_avg_pricres_detail(self,url):
        #url = 'https://www.dongchedi.com/auto/params-carIds-x-4802'
        priceslist = []
        try:
            response = requests.get(url, timeout=5)
            s = requests.session()
            s.close()
            html2 = response.text
            soup2 = BeautifulSoup(html2, 'lxml')
            html_tree = etree.HTML(html2)
            datas = soup2.find('div',class_='table_root__14vH_ table_head__FNAvn')
            namelist = datas.find_all('div', class_='table_row__yVX1h')[0].find_all('div',class_='table_col__3Pc3_ table_is-head-col__1sAQG')
            pricesdata  =datas.find_all('div',class_='table_row__yVX1h')[1].find_all('div',class_='table_col__3Pc3_ table_is-head-col__1sAQG')
            names =[]
            for name in namelist:
                names.append(name.text.split('+对比')[0])
            for  l in pricesdata:
                priceslist.append(float(l.text.split('万')[0]))
            # priceslist.sort()
            dics = dict(zip(priceslist,names))
            #中位数
            def get_median(data):
                data.sort()
                half = len(data) // 2
                return data[half]
            median_price = get_median(priceslist)
            avg_price=sum(priceslist)/len(priceslist)
            prices = [priceslist,avg_price,median_price,dics[median_price]]
        except:
            prices =[None,0,0,None]
        return prices
    #配置信息
    def veh_config(self):

        return

if __name__ =='__main__':
    dongchedi =Dongchedi()
    hreflist =[]
    # # dongchedi.get_car_num()
    # # dongchedi.first_get_community()
    # dongchedi.get_community() #基于获取的社区统计，每30分钟跑一次，查看是否有更新，作为是否更新质量反馈表的依据
    # dongchedi.koubei_scores()

    # dongchedi.get_avg_pricres()
    # # 发送邮件通知任务完成
    # from qqmail_smtp import mail
    # mailtext='懂车帝中端配置车辆价格爬取成功，请留意。'
    # mailbox = '1037654919@qq.com'
    # mail(mailtext,mailbox)

    #test
    # dongchedi.get_car_num()
    dongchedi.get_community_wenda()
