import re
import os
import requests
import time
from lxml import etree
import json
import csv
import threading
"""
https://book.jd.com/booksort.html  全部图书分类
self.url = 'https://list.jd.com/list.html?cat=1713,3258,3297&page={}'

https://list.jd.com/list.html?cat=1713,3258,3297&tid=3297&page=0  中国当代小说第一页 
https://list.jd.com/list.html?cat=1713,3258,3299&tid=3299&page=0  中国古典小说第一页
       ######cat  tid  不同

详情页
https://item.jd.com/12114139.html
https://item.jd.com/10056950.html

价格
https://p.3.cn/prices/mgets?&skuIds=J_12114139
"""
class JDong():
    def __init__(self):
        # 中国当代小说类
        self.url = 'https://list.jd.com/list.html?cat=1713,3258,3297&page={}'
        # 军事类
        # self.url = 'https://list.jd.com/list.html?cat=1713,3258,3308&tid=3308&page={}'
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36'
        }

    def get_url_list(self):
        print('正在获取url...........')
        # 存放详情页url
        content_list = []
        url_list = []
        #    ***中国当代小说   共 243 页  ****
        for i in range(10):
            url_list.append(self.url.format(i))
        index = 1
        for url in url_list:
            print('url:', index, url)
            index += 1
            response = requests.get(url, headers=self.headers)
            r = response.content.decode()
            html = etree.HTML(r)
            # print(html)
            div_list = html.xpath('//li[@class="gl-item"]//div[@class="p-img"]')
            for div in div_list:
                content_list.append('https:' + div.xpath('./a/@href')[0])
        return content_list

    # 获取详情页数据
    def get_content(self, content_list):
        print('*************开始获取id,作者，书名**************')
        img_list = []
        info_list = []
        index = 1
        for url in content_list:
            print('正在获取id,作者，书名。。。。。', index)
            index += 1
            id = 'J_' + url[20:-5]
            # print(id)
            response = requests.get(url, headers=self.headers, timeout=50)
            try:
                r = response.content.decode('gb18030')
            except:
                r = response.content.decode('gb18030', 'ignore').encode('utf8')
            else:
                pass

            html = etree.HTML(r)

            try:
                img_url = 'https:' + html.xpath('//div[@id="spec-n1"]//img/@src')[0]
                # print(img_url)
            except:
                print('图片url获取失败')
            else:
                img_list.append(img_url)
                item = {}
                item['id'] = id
                item["作者"] = html.xpath('//div[@class="p-author"]/a/@data-name')
                item["书名"] = json.loads(json.dumps(html.xpath('//div[@class="sku-name"]/text()')[0]).replace(' ', ''))
                info_list.append(item)
                print(item)

        # print(info_list)
        print('Id,作者，书名获取完毕！！！！！！！！！！！！！！！！')
        return img_list, info_list

    # 获取价格
    def get_price(self,content_list):
        print('***********开始获取价格**********')
        price_list = []
        url_i = 'https://p.3.cn/prices/mgets?&skuIds=J_{}'
        index = 1
        for i in content_list:
            print('获取价格中..第%s 个......' % index)
            index += 1
            item = {}
            num = i[20:-5]
            url_price = url_i.format(num)
            # print(url_price)
            response = requests.get(url_price, timeout=50)
            p = json.loads(response.content.decode())
            item['id'] = p[0]['id']
            item['price'] = p[0]['p']

            print(item)

            price_list.append(item)

        print('获取价格完毕!!!!!!!!!!!!!!!!!')
        return price_list

    def save_info(self, *args):
        print('***************正在保存信息中.........*********************')
        with open('中国当代小说.csv', 'a+', newline='') as f:
            csv.writer(f).writerow(['ID', '作者', '书名', '价格'])
            index = 1
            for info in args:
                print('正在保存csv文件信息 第 %s 个。。。。'% index)
                index += 1
                t = csv.writer(f)
                t.writerow([info['id'], info['作者'], info['书名'], info['price']])
        time.sleep(1)

    def save_img(self, *args, ):
        print('***********正在下载图片..........***************')

        path = 'D:/Python__2/08-29/中国当代小说/中国当代小说'
        if not os.path.exists(path):
            os.makedirs(path)

        index = 1
        for img in args:
            print('下载图片中.......第%s 个' % index)
            try:
                response = requests.get(img, headers=self.headers)
            except:
                print('图片下载失败')
            else:
                r = response.content
                with open(path + '{}.jpg'.format(index), 'wb') as f:
                    f.write(r)
                index += 1
        time.sleep(1)

    def run(self):
        print('******************爬虫开始运行************************')
        content_list = self.get_url_list()
        # 获取图片url 和 作者，书名
        img_list, info_list = self.get_content(content_list)
        # print(info_list)
        # 获取 价格
        price_list = self.get_price(content_list)

        # 根据书的id 添加相应价格  保存到 info_list 中
        for info in info_list:
            for info_ in price_list:
                if info['id'] == info_['id']:
                    info['price'] = info_['price']

        t1 = threading.Thread(target=self.save_info, args=info_list)
        t2 = threading.Thread(target=self.save_img, args=img_list)
        t1.start()
        t2.start()




if __name__ == '__main__':
    jd = JDong()
    jd.run()















