import time

import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
import pymongo
from concurrent import futures
from fake_useragent import UserAgent

class CrawlDog:
    def __init__(self, keyword):
        """
        初始化
        :param keyword: 搜索的关键词
        """
        self.keyword = keyword
        # self.mongo_client = pymongo.MongoClient(host='localhost', port=27017)
        self.mongo_client = pymongo.MongoClient('mongodb://admin:123456@localhost:27017/')
        self.mongo_collection = self.mongo_client['spiders']['jd']
        self.mongo_collection.create_index([('item_id', pymongo.ASCENDING)])

    def get_index(self, page):
        """
        从搜索页获取相应信息并存入数据库
        :param page: 搜索页的页码
        :return: 商品的id
        """
        url = 'https://search.jd.com/Search?keyword=%s&enc=utf-8&page=%d' % (self.keyword, page)
        index_headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
                      'application/signed-exchange;v=b3',
            'accept-encoding': 'gzip, deflate, br',
            'Accept-Charset': 'utf-8',
            'accept-language': 'zh,en-US;q=0.9,en;q=0.8,zh-TW;q=0.7,zh-CN;q=0.6',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/74.0.3729.169 Safari/537.36'
        }
        rsp = requests.get(url=url, headers=index_headers).content.decode()

        rsp = etree.HTML(rsp)
        items = rsp.xpath('//li[contains(@class, "gl-item")]')
        for item in items:
            try:
                info = dict()

                info['title'] = ''.join(item.xpath('.//div[@class="p-name p-name-type-2"]//em//text()'))
                info['url'] = 'https:' + item.xpath('.//div[@class="p-name p-name-type-2"]/a/@href')[0]
                info['store'] = item.xpath('.//div[@class="p-shop"]/span/a/text()')[0]
                info['store_url'] = 'https' + item.xpath('.//div[@class="p-shop"]/span/a/@href')[0]
                info['item_id'] = info.get('url').split('/')[-1][:-5]
                info['price'] = item.xpath('.//div[@class="p-price"]//i/text()')[0]
                # info['commit'] = item.xpath('.//div[@class="p-commit"]/a/text()')[0]
                # info['commit'] = item.xpath('.//div[@class="p-commit"]//a/text()')[0]


                print(info)

                info['comments'] = []



                self.mongo_collection.insert_one(info)


                # yield info['item_id']
            # 实际爬取过程中有一些广告, 其中的一些上述字段为空
            except IndexError:
                print('item信息不全, drop!')
                continue

    def get_comment(self, params):
        """
        获取对应商品id的评论
        :param params: 字典形式, 其中item_id为商品id, page为评论页码
        :return:
        """
        url = 'https://sclub.jd.com/comment/productPageComments.action?productId=%s&score=0&sortType=5&page=%d&' \
              'pageSize=10' % (params['item_id'], params['page'])
        comment_headers = {
            'Referer': 'https://item.jd.com/%s.html' % params['item_id'],
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/74.0.3729.169 Safari/537.36'
        }
        rsp = requests.get(url=url, headers=comment_headers).json()
        comments_count = rsp.get('productCommentSummary').get('commentCountStr')
        comments = rsp.get('comments')
        comments = [comment.get('content') for comment in comments]
        self.mongo_collection.update_one(
            # 定位至相应数据
            {'item_id': params['item_id']},
            {
                '$set': {'comments_count': comments_count},  # 添加comments_count字段
                '$addToSet': {'comments': {'$each': comments}}  # 将comments中的每一项添加至comments字段中
            }, True)

    def get_page(self, page):
        """
                从搜索页获取相应信息并存入数据库
                :param page: 搜索页的页码
                :return: 商品的id
                """
        url = 'https://search.jd.com/Search?keyword=%s&enc=utf-8&page=%d' % (self.keyword, page)
        index_headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
                      'application/signed-exchange;v=b3',
            'accept-encoding': 'gzip, deflate, br',
            'Accept-Charset': 'utf-8',
            'accept-language': 'zh,en-US;q=0.9,en;q=0.8,zh-TW;q=0.7,zh-CN;q=0.6',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/74.0.3729.169 Safari/537.36'
        }
        # rsp = requests.get(url=url, headers=index_headers).content.decode()

        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')

        #ua
        chrome_options.add_argument(f'user-agent={UserAgent().random}')

        print(chrome_options.arguments)
        # chrome_options
        browser = webdriver.Chrome(options=chrome_options)

        browser.implicitly_wait(60)

        url = 'https://search.jd.com/Search?keyword=%s&enc=utf-8&page=%d' % (self.keyword, page)
        browser.get(url)
        # print(browser.page_source)
        if len(browser.page_source) > 0:
            print(url)
            print(browser.page_source)
        else:
            print("empty")

        rsp = etree.HTML(browser.page_source)
        items = rsp.xpath('//li[contains(@class, "gl-item")]')
        for item in items:
            try:
                info = dict()

                info['title'] = ''.join(item.xpath('.//div[@class="p-name p-name-type-2"]//em//text()'))
                info['url'] = 'https:' + item.xpath('.//div[@class="p-name p-name-type-2"]/a/@href')[0]
                info['store'] = item.xpath('.//div[@class="p-shop"]/span/a/text()')[0]
                info['store_url'] = 'https' + item.xpath('.//div[@class="p-shop"]/span/a/@href')[0]
                info['item_id'] = info.get('url').split('/')[-1][:-5]
                info['price'] = item.xpath('.//div[@class="p-price"]//i/text()')[0]
                # info['commit'] = item.xpath('.//div[@class="p-commit"]/a/text()')[0]
                info['commit'] = item.xpath('.//div[@class="p-commit"]//a/text()')[0]

                print(info)

                info['comments'] = []

                self.mongo_collection.insert_one(info)

                # yield info['item_id']
            # 实际爬取过程中有一些广告, 其中的一些上述字段为空
            except IndexError:
                print('item信息不全, drop!')
                continue

        # time.sleep(30)

        pass
    def main(self, index_pn, comment_pn):
        """
        实现爬取的函数
        :param index_pn: 爬取搜索页的页码总数
        :param comment_pn: 爬取评论页的页码总数
        :return:
        """
        # 爬取搜索页函数的参数列表
        il = [i * 2 + 1 for i in range(index_pn)]
        # 创建一定数量的线程执行爬取
        with futures.ThreadPoolExecutor(1) as executor:
            # res = executor.map(self.get_index, il)
            res = executor.map(self.get_page, il)

        # for item_ids in res:
        #     # 爬取评论页函数的参数列表
        #     cl = [{'item_id': item_id, 'page': page} for item_id in item_ids for page in range(comment_pn)]
        #     with futures.ThreadPoolExecutor(15) as executor:
        #         executor.map(self.get_comment, cl)

    def get_JD(self):

        # #
        # #
        # self.get_page(83)
        # self.get_page(89)
        # #
        # self.get_page(97)
        # self.get_page(103)
        # #

        # self.get_page(145)
        # self.get_page(153)
        # #)
        # #)
        # self.get_page(187)
        # self.get_page(189)
        # self.get_page(191)
        # self.get_page(195)
        # self.get_page(199)

        pass

if __name__ == '__main__':
    # 测试, 只爬取两页搜索页与两页评论

    test = CrawlDog('真无线耳机')
    # test.main(100, 100)
    test.get_JD()