# -*- coding: utf-8 -*-
import requests, pymysql, re, urllib3, datetime
from lxml import etree
from redis import Redis

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


# noinspection PyMethodMayBeStatic,DuplicatedCode
class JDBook(object):
    """京东童书爬虫"""

    def __init__(self):
        self.url = "https://list.jd.com/list.html?cat=1713%2C3263%2C3394&ev=3731_41261%7C%7C19872%7C%7C41263%7C%7C41264%5E&page={}"
        self.list_headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': 'shshshfpa=bc39f0cc-90e2-12d8-8de2-19e7e56abd17-1594561987; shshshfpb=sfcL%2FTW5%2FD%2FTMWSlfXxau0w%3D%3D; __jdu=746435273; unpl=V2_ZzNtbUEFREV1AEMAchsPDWIHEQoRUUJCJV9BB3wRDFdgBUVUclRCFnQURldnGFUUZwUZXUFcRxFFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZH0cWwBlBxFbQGdzEkU4dlF%2bHFsAZzMTbUNnAUEpD09QfhldSGEGFVhAU0ATdzhHZHg%3d; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_2c6a194e83b9452fb70faf6b69ac67f8|1607418775843; areaId=5; ipLoc-djd=5-142-42546-0; _pst=jd_44c0e76766cbf; unick=jd_44c0e76766cbf; pin=jd_44c0e76766cbf; _tp=lHDlCkNjPFBKkp4%2F4VcRW8SiTOGY3b5Jcup6kzOdaGY%3D; pinId=opD_qhgrhYpARIWmmg8_LLV9-x-f3wj7; user-key=44a453cf-b93d-458a-af00-5f9c3bcd19cf; cn=0; thor=3229D488E440CE7BD207FF1057CA4F3ABDDFCB44AD544ED45499B97EBA8D085FF0E62E65A490ABE97D73BBF5721F9E96919C0CDC424257C140C6DA7D39BF7EFE3BD8FC63E3A5C5691E16B92077CD8E410CD708C9094DAD31D20B42782EED146EF2CB91B5019ED6DDF81CAF6E9646136F03836F4C0D169EBCDE05FED4D6D8931CE087E9BE8E943A7BF7ACFB7EEC91E3584E4216397E0E1FD96AC23B64372D517A; __jda=122270672.746435273.1604474404.1607649573.1607655476.12; __jdc=122270672; 3AB9D23F7A4B3C9B=2HWRJ2BS2IUO5FQ2LZDMJBYI7FSRGW2UKMMDAWHJI3IUWQOKKP3E2FNYLF6HJUGA7CXBDY4HCOHCXSDEIDHVLLIK4I; shshshfp=5d32a89e5f2eb419bb66bc5e0a5d5399; __jdb=122270672.6.746435273|12.1607655476; shshshsID=a94cb13fb1703ff51f60444eb6856bea_3_1607655498538',
            'Host': 'list.jd.com',
            'Referer': 'https://list.jd.com/list.html?cat=1713,3263,3394',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
        }
        self.detail_headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'cookie': 'shshshfpa=bc39f0cc-90e2-12d8-8de2-19e7e56abd17-1594561987; shshshfpb=sfcL%2FTW5%2FD%2FTMWSlfXxau0w%3D%3D; __jdu=746435273; areaId=5; ipLoc-djd=5-142-42546-0; _pst=jd_44c0e76766cbf; unick=jd_44c0e76766cbf; pin=jd_44c0e76766cbf; _tp=lHDlCkNjPFBKkp4%2F4VcRW8SiTOGY3b5Jcup6kzOdaGY%3D; user-key=44a453cf-b93d-458a-af00-5f9c3bcd19cf; cn=0; unpl=V2_ZzNtbUJeEBEhCREAKEkIA2JWQFhLBUUcd1pEVn0QCAIzCxRbclRCFnQURldnGVwUZwEZWUBcRxZFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZH0cWwBlBxFbQGdzEkU4dlZ5Gl0FZDMTbUNnAUEpD0BSehERA2IEF19GVEUXRQl2Vw%3d%3d; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_18b5e0febae74dc48c783c3378e6e977|1607666033256; shshshfp=5d32a89e5f2eb419bb66bc5e0a5d5399; __jda=122270672.746435273.1604474404.1607655476.1607666033.13; 3AB9D23F7A4B3C9B=2HWRJ2BS2IUO5FQ2LZDMJBYI7FSRGW2UKMMDAWHJI3IUWQOKKP3E2FNYLF6HJUGA7CXBDY4HCOHCXSDEIDHVLLIK4I; __jdc=122270672; wlfstk_smdl=lvgq79237djqsjuspag19pqb6nfzi064; logintype=qq; npin=jd_44c0e76766cbf; thor=3229D488E440CE7BD207FF1057CA4F3ABDDFCB44AD544ED45499B97EBA8D085F8995287F13C43A45DF66C5307F1AF31303043834FBC19390126079C0209EC43A054069798FE3D0264BE22C1B97CADCEFCCF5B9D73DB8E601252793B4F27F03150C04BB42B67CA6B3458F09DFB29E026676C00997818CED3E9EDE8B4B04962E7C36B4FE7F7037C5AD89B6888F3D5C9CB9C372D5CA2E43CB72C6D92FA39E8A03DC; pinId=opD_qhgrhYpARIWmmg8_LLV9-x-f3wj7; shshshsID=021f2cfaf22b4d5aa996993ae0edc1d4_5_1607667761727; __jdb=122270672.20.746435273|13.1607666033',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
        }
        self.conn = pymysql.connect(host='127.0.0.1', port=3306, db="bookshelf", user='root', passwd='root', charset='utf8')
        self.cur = self.conn.cursor()
        self.redis_conn = Redis(host='127.0.0.1', port=6379)

    def get_data(self, url, headers):
        """发送请求，获取相应"""
        res = requests.get(url, headers=headers, verify=False, allow_redirects=False)
        data = res.content.decode()
        return data

    def parse_data(self, list_html):
        """解析列表页获取到的数据的id，并拼接详情页url"""
        wid = re.findall("search000014_log:{wids:'(.*?)'", list_html, re.DOTALL)[0]
        wid_lis = wid.split(",")
        detail_lis_urls = []
        for wid in wid_lis:
            detail_lis_url = f'https://item.jd.com/{wid}.html'
            detail_lis_urls.append(detail_lis_url)
        return detail_lis_urls

    def parse_detail(self, detail_html):
        """获取到详情页所有展示的书本id"""
        wid_lis = re.findall('"skuId":(.*?),', detail_html, re.DOTALL)
        new_detail_data_lis = []
        for wid in wid_lis:
            item = dict()
            detail_url = f'https://item.jd.com/{wid}.html'
            item['detail_url'] = detail_url
            item['detail_id'] = wid
            new_detail_data_lis.append(item)
        return new_detail_data_lis

    def parse_new_detail(self, new_detail_html):
        """解析详情页，获取书籍详细信息"""
        html = etree.HTML(new_detail_html)
        item = dict()
        # 标题
        item['title'] = html.xpath("//*[@class='p-name']/text()")[0]
        # 封面图
        item['cover'] = html.xpath('//*[@id="spec-img"]/@src')[0]
        # 作者
        item['author'] = html.xpath('//*[@id="p-author"]/a/text()')[0]
        # 评分
        item['score'] = ''
        # 来源/出版日期/ISBN
        item['source'] = ''
        item['pub_date'] = ''
        item['ISBN'] = ''
        for li in html.xpath('//*[@id="parameter2"]/li'):
            # print(li.xpath('.//text()'))
            if "出版社：" in li.xpath('.//text()')[0]:
                item['source'] = li.xpath("./@title")[0]
            if "出版时间：" in li.xpath('.//text()')[0]:
                item['pub_date'] = li.xpath("./@title")[0]
            if "ISBN：" in li.xpath('.//text()')[0]:
                item['ISBN'] = li.xpath("./@title")[0]
        # 入库时间
        item['save_time'] = datetime.datetime.now()
        # 用户id
        item['account_id'] = 1
        return item

    def parse_introduce(self, introduce_html):
        try:
            filter_html = introduce_html.replace("\\n", "").replace("\\", "")
            content_html = re.findall('"content":"            (.*?)}', filter_html, re.DOTALL)[0]
            html = etree.HTML(content_html)
            introduce_lis = html.xpath('//*[@id="detail-tag-id-3"]/div[2]/div//text()')
            introduce = "".join([str(i) for i in introduce_lis])
        except Exception as e:
            print(e)
            introduce = ''
        if "'" in introduce or '"' in introduce:
            new_introduce = introduce.replace("'", "`").replace('"', "`")
        else:
            new_introduce = introduce
        return new_introduce

    def save_data(self, item):
        """存数据库"""
        sql = "replace into books(title,cover,author,source,pub_date,score,introduce,save_time,ISBN,account_id)values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
            item['title'], item['cover'], item['author'], item['source'], item['pub_date'], item['score'], item['introduce'], item['save_time'],
            item['ISBN'], item['account_id'])
        self.cur.execute(sql)
        self.conn.commit()

    def run(self):
        num = 1
        while True:
            list_html = self.get_data(self.url.format(num), self.list_headers)
            try:
                url_list = self.parse_data(list_html)
            except Exception as e:
                print(e)
                continue
            for url in url_list:
                # result = self.redis_conn.sadd('jd_urls', url)
                # if not result:
                #     continue
                detail_html = self.get_data(url, self.detail_headers)
                new_detail_data_lis = self.parse_detail(detail_html)
                for data in new_detail_data_lis:
                    result = self.redis_conn.sadd('jd_urls', data['detail_url'])
                    if not result:
                        continue
                    new_detail_html = self.get_data(data['detail_url'], self.detail_headers)
                    new_detail_data = self.parse_new_detail(new_detail_html)
                    introduce_url = f"https://dx.3.cn/desc/{data['detail_id']}?encode=utf-8&cdn=2&callback=showdesc"
                    introduce_html = self.get_data(introduce_url, self.detail_headers)
                    new_detail_data['introduce'] = self.parse_introduce(introduce_html)
                    print(new_detail_data)
                    self.save_data(new_detail_data)
            num += 1


if __name__ == '__main__':
    jd = JDBook()
    jd.run()
