import csv
import requests
import re
from lxml import etree
from fontTools.ttLib import TTFont


def get_jk_1():
    for i in range(3, 5):
        url = 'https://k.autohome.com.cn/ajax/getSceneSelectCar'
        headers = {
            "accept": "application/json, text/javascript, */*; q=0.01",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "no-cache",
            "cookie": "fvlid=1656924201724ySAcFOPXMC; sessionip=183.134.206.31; sessionid=D289E00F-691A-4D00-99B2-917D7A06EC07%7C%7C2022-07-04+16%3A43%3A31.021%7C%7Ccn.bing.com; autoid=57d78233cf805efb749ee48cbcd85866; area=330199; sessionuid=D289E00F-691A-4D00-99B2-917D7A06EC07%7C%7C2022-07-04+16%3A43%3A31.021%7C%7Ccn.bing.com; __ah_uuid_ng=c_D289E00F-691A-4D00-99B2-917D7A06EC07; sessionvid=F95FF07D-6EBB-4A87-9CCE-525B41FA808C; ahsids=2967; s_cfb4c=5; clubUserShow=61582310|692|2|%E6%B8%B8%E5%AE%A2|0|0|0||2022-07-05+12%3A37%3A07|0; clubUserShowVersion=0.1; ahpvno=34; pvidchain=3454440,3311664,3311255,2099124,2099124,2099124,2099124,3311255; v_no=10; visit_info_ad=D289E00F-691A-4D00-99B2-917D7A06EC07||F95FF07D-6EBB-4A87-9CCE-525B41FA808C||-1||-1||10; ref=cn.bing.com%7C0%7C0%7C0%7C2022-07-05+12%3A39%3A15.173%7C2022-07-04+16%3A43%3A31.021; ahrlid=16569959421524nrmHlqogh-1656995951973",
            "pragma": "no-cache",
            "referer": "https://k.autohome.com.cn/",
            "sec-ch-ua": "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"102\", \"Microsoft Edge\";v=\"102\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "empty",
            "sec-fetch-mode": "cors",
            "sec-fetch-site": "same-origin",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44",
            "x-requested-with": "XMLHttpRequest"
        }
        params = {
            "minprice": "2",
            "maxprice": "110",
            "_appid": "koubei",
            "fueltype": f"{i}"
        }
        r = requests.get(url=url, headers=headers, params=params)
        contents = r.json().get('result')
        for content in contents:
            series_id = content.get('seriesId')
            koubei_count = content.get('koubeiCount')
            with open('url1.csv', 'a', encoding='utf-8', newline='') as f:
                writer = csv.writer(f)
                writer.writerow([series_id, koubei_count])


def get_jk_2():
    headers = {
        "accept": "application/json, text/plain, */*",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "no-cache",
        "cookie": "fvlid=1656924201724ySAcFOPXMC; sessionip=183.134.206.31; sessionid=D289E00F-691A-4D00-99B2-917D7A06EC07%7C%7C2022-07-04+16%3A43%3A31.021%7C%7Ccn.bing.com; autoid=57d78233cf805efb749ee48cbcd85866; area=330199; sessionuid=D289E00F-691A-4D00-99B2-917D7A06EC07%7C%7C2022-07-04+16%3A43%3A31.021%7C%7Ccn.bing.com; __ah_uuid_ng=c_D289E00F-691A-4D00-99B2-917D7A06EC07; sessionvid=F95FF07D-6EBB-4A87-9CCE-525B41FA808C; ahsids=2967; s_cfb4c=5; pvidchain=2099124,2099124,2099124,2099124,3311255,2099124,2099124,2112108,2099124,2112108; v_no=17; visit_info_ad=D289E00F-691A-4D00-99B2-917D7A06EC07||F95FF07D-6EBB-4A87-9CCE-525B41FA808C||-1||-1||17; ref=cn.bing.com%7C0%7C0%7C0%7C2022-07-05+12%3A51%3A29.160%7C2022-07-04+16%3A43%3A31.021; ahpvno=42",
        "origin": "https://k.autohome.com.cn",
        "pragma": "no-cache",
        "referer": "https://k.autohome.com.cn/",
        "sec-ch-ua": "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"102\", \"Microsoft Edge\";v=\"102\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\"",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-site",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44"
    }
    with open('url1.csv', 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        for row in reader:
            for i in range(1, int(row[1])//20+2):
                print(i)
                params = {
                    "pm": "3",
                    "seriesId": row[0],
                    "pageIndex": f"{i}",
                    "pageSize": "20",
                    "yearid": "0",
                    "ge": "0",
                    "seriesSummaryKey": "0",
                    "order": "0"
                }
                url = 'https://koubeiipv6.app.autohome.com.cn/pc/series/list'
                r = requests.get(url=url, headers=headers, params=params)
                contents = r.json().get('result').get('list')
                for content in contents:
                    with open('url2.csv', 'a', encoding='utf-8', newline='') as ff:
                        writer = csv.writer(ff)
                        writer.writerow([content.get('showId')])


class Spider():
    def __init__(self):
        with open('data.csv', 'a', encoding='utf-8', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(['用户名', '车型', '发布时间', '行驶里程', '百公里油耗', '裸车购买价', '购买时间', '购买地点'])
        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "no-cache",
            "cookie": "fvlid=1656924201724ySAcFOPXMC; sessionip=183.134.206.31; sessionid=D289E00F-691A-4D00-99B2-917D7A06EC07%7C%7C2022-07-04+16%3A43%3A31.021%7C%7Ccn.bing.com; autoid=57d78233cf805efb749ee48cbcd85866; area=330199; sessionuid=D289E00F-691A-4D00-99B2-917D7A06EC07%7C%7C2022-07-04+16%3A43%3A31.021%7C%7Ccn.bing.com; __ah_uuid_ng=c_D289E00F-691A-4D00-99B2-917D7A06EC07; sessionvid=7A7058A9-1324-40F3-835D-A7DEC068F661; pvidchain=3311255,2099124,2112108; ahpvno=20; v_no=5; visit_info_ad=D289E00F-691A-4D00-99B2-917D7A06EC07||7A7058A9-1324-40F3-835D-A7DEC068F661||-1||-1||5; ref=cn.bing.com%7C0%7C0%7C0%7C2022-07-05+00%3A38%3A29.836%7C2022-07-04+16%3A43%3A31.021",
            "pragma": "no-cache",
            "sec-ch-ua": "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"102\", \"Microsoft Edge\";v=\"102\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44"
        }

    def get_first(self):
        url = 'https://k2.autoimg.cn/g3/M07/DD/5F/wKgHF1sV94WAYmQCAADV5BXfOWY06..ttf'
        r = requests.get(url=url, headers=self.headers)
        with open('car.ttf', 'wb') as f:
            f.write(r.content)
        font = TTFont('car.ttf')
        font.saveXML('car.xml')

    def get_data(self):
        font = TTFont('car.ttf')
        u_list = font.getGlyphOrder()[1:]
        word_list = '只手上矮地加下问外软自灯动四更量坐着路是九真远三音身来大保|电六比皮控机里空有副盘档很光泥五无公的低小门长近冷好中八油左二耗级多坏味孩启得呢性硬内实了当右二开七短养高和雨一排十过少'
        be_p1 = []  # 保存38个字符的（x,y）信息
        for uni in u_list:
            p1 = []  # 保存一个字符的(x,y)信息
            p = font['glyf'][uni].coordinates
            for f in p:  # 把GlyphCoordinates对象改成一个列表
                p1.append(f)
            be_p1.append(p1)
        with open('url2.csv', 'r', encoding='utf-8') as fff:
            reader = csv.reader(fff)
            for row in reader:
                print(f'正在访问: https://k.autohome.com.cn/detail/view_{row[0]}.html')
                url = f'https://k.autohome.com.cn/detail/view_{row[0]}.html'
                r = requests.get(url=url, headers=self.headers)
                ttf1 = re.findall('link rel="preload" href="(.*?)" as="font" crossorigin', r.text, re.S)
                if ttf1:
                    ttf1 = ttf1[0]
                    r1 = requests.get(url=ttf1, headers=self.headers)
                    with open('car1.ttf', 'wb') as f:
                        f.write(r1.content)
                    font1 = TTFont('car1.ttf')
                    u_list1 = font1.getGlyphOrder()[1:]
                    self.utf8List = [eval('u\'\\u' + uni[3:].lower() + '\'') for uni in u_list1]
                    be_p2 = []  # 保存38个字符的（x,y）信息
                    for uni in u_list1:
                        p2 = []  # 保存一个字符的(x,y)信息
                        p = font1['glyf'][uni].coordinates
                        for f in p:  # 把GlyphCoordinates对象改成一个列表
                            p2.append(f)
                        be_p2.append(p2)
                    n2 = 0  # uni下标计数
                    self.x_list = []
                    data = {}
                    for d in be_p2:
                        n2 += 1
                        n1 = 0
                        for a in be_p1:
                            n1 += 1
                            if self.comp(a, d):
                                self.x_list.append((word_list[n1 - 1], self.utf8List[n1-1]))
                                data[self.utf8List[n1-1]] = data.get(self.utf8List[n1-1], '') + word_list[n1 - 1]
                    html = etree.HTML(r.text)
                    # 下面是获取正文
                    divs = html.xpath('.//div[@class="kb-con"]/div')
                    content = ''
                    for div in divs:
                        texts = [i.replace(' ', '').strip() for i in div.xpath('.//text()')]
                        for text in texts:
                            if text in self.utf8List:
                                text = data.get(text)
                                content += text
                            else:
                                content += text.replace('\n', '')
                    # 下面是获取其他字段
                    name = html.xpath('//div[contains(@class, "msg")]/p[@class="name"]/a/text()')
                    lx = html.xpath('//div[contains(@class, "msg")]/p[contains(@class, "car-con")]/text()')
                    date = html.xpath('//div[@class="kb-msg"]/div[@class="timeline-con"]/span/text()')
                    gg = html.xpath('//div[@class="kb-msg"]/div[@class="timeline-con"]/div[@class="kb-con"]/ul/li/div/text()')
                    try:
                        with open('data.csv', 'a', encoding='utf-8', newline='') as f:
                            writer = csv.writer(f)
                            writer.writerow([name[0], lx[0], date[0], gg[0], gg[2], gg[4], gg[6], gg[8], content])
                        print('保存成功...')
                    except:
                        print('内容错误...')
                else:
                    print('无ttf文件...')

    def comp(self, l1, l2):  # 定义一个比较函数，比较两个列表的坐标信息是否相同
        if len(l1) != len(l2):  # 两数据的数目不等
            return False  # 判断结束
        else:  # 数据相同
            mark = 1  # 成功标志：1代表True，0代表False
            for i in range(len(l1)):
                if abs(l1[i][0] - l2[i][0]) < 40 and abs(l1[i][1] - l2[i][1]) < 40:
                    pass
                else:
                    mark = 0
                    break
            return mark


if __name__ == '__main__':
    # get_jk_1()
    # get_jk_2()
    spider = Spider()
    spider.get_data()
