# -*- coding:utf-8 -*-

import requests
import json
import re
from time import sleep
from collections import defaultdict
from datetime import datetime
from lxml import etree
from multiprocessing.dummy import Pool as ThreadPool
from mysql_api import MySQL_API
import sys
reload(sys)
sys.setdefaultencoding("utf-8")


user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) ' \
                          'AppleWebKit/537.36 (KHTML, like Gecko) ' \
                          'Chrome/63.0.3239.132 Safari/537.36'
proxy_list = {
    "https": "http://135.245.48.34:8000",
    "http": "http://135.245.48.34:8000",
  }


class JingDong(object):
    def __init__(self, file_path=None, proxy_flag=False, gzip_enable=True, sql=None):
        self.file_path = file_path
        self.search_url = "https://search.jd.com/Search"
        # self.search_url_plus = "https://search.jd.com/s_new.php"
        self.item_url = "https://item.jd.com/"
        self.comment_url = "https://club.jd.com/comment/skuProductPageComments.action"
        self.proxy_flag = proxy_flag
        self.gzip_enable = gzip_enable
        self.produce_id_list = set()
        self.con_SQL_info = sql
        self.key_list = ['referenceName', 'productColor', 'referenceTime', 'creationTime','score', 'userClientShow',
                    'userLevelName','afterDays']
        rename_key_list = ['productName', 'productColor', 'orderTime', 'commentTime', 'score', 'userClientShow',
                           'userLevelName', 'commentplusafterDays']  # ,'commentContent',  'commentplusContent'
        self.keys_map = {self.key_list[i]: rename_key_list[i] for i in range(len(self.key_list))}
        self.cmt_total_num = 0

    def get_page(self, url, params=None, old_s=None, update_headers=None):
        try:
            headers = {'user_agent': user_agent,
                       'Accept-Language': 'zh-CN,zh;q=0.9',
                       'Cache-Control': 'max-age=0',
                       }

            if update_headers:
                headers.update(update_headers)

            if old_s:
                new_s = old_s
            else:
                new_s = requests.Session()

            if self.gzip_enable:
                headers['Accept-Encoding'] = 'gzip'

            if self.proxy_flag:
                resp = new_s.get(url, params=params, headers=headers, proxies=proxy_list)
            else:
                resp = new_s.get(url, params=params, headers=headers)
            # print resp.url
            return resp

        except Exception, e:
            print Exception, e

    def search_page_rst(self, search_content):
        payload = {
            "keyword": search_content,
            "enc": "utf-8",
            "qrst": 1,
            "rt": 1,
            "stop": 1,
            "vt": 2,
            "wq": search_content,
            # "page": 1,
            # "s": 55,
            "psort": 3,
            "click": 0,
        }
        page_resp = self.get_page(self.search_url, params=payload)
        print page_resp.url
        page_content = page_resp.content.decode('gbk','ignore')
        page_etree = etree.HTML(page_content)
        produce_ids = page_etree.xpath('//*[@id="J_goodsList"]/ul/li/@data-sku')
        self.produce_id_list.update(set(produce_ids))
        return produce_ids

    def produce_page(self, produce_id):
        page_url = self.item_url + "%s.html" % produce_id
        page_resp = self.get_page(url=page_url)
        page_content = page_resp.content.decode('gbk','ignore')
        page_etree = etree.HTML(page_content)
        produce_sub_ids = page_etree.xpath('//*[@id="choose-attr-1"]/div[2]/div/@data-sku')
        self.produce_id_list.update(set(produce_sub_ids))

    def produce_comments_page(self, produce_id, page_index=0):
        payload = {
            "callback": "fetchJSON_comment98vv8444",
            "productId": produce_id,
            "score": 0,
            "sortType": 5,
            "page": page_index,
            "pageSize": 10,
            "isShadowSku": 0,
            "fold": 1
        }
        resp = self.get_page(url=self.comment_url, params=payload)
        # print resp.url
        return resp

    def get_comments_info(self, param):
        product_id, comment_page_index = param
        resp = self.produce_comments_page(product_id, comment_page_index)
        if resp.status_code == 200:
            produce_comments = resp.content.decode('gbk', 'ignore')
            json_data = re.findall(r'\((.*)\)', produce_comments, re.S)
            if json_data is False or len(json_data[0]) < 1000:
                print resp.url
                print "the length of resp.content is %s" % len(produce_comments)
                print "json_data number is %s " % len(json_data)
                return 0
        else:
            return 0
        comments_dic = json.loads(json_data[0])

        items_dic = defaultdict(int)
        if 'comments' not in comments_dic:
            return 0

        self.cmt_total_num += len(comments_dic['comments'])
        for cmt in comments_dic['comments']:
            if self.con_SQL_info:
                for key in self.keys_map:
                    if key in cmt:
                        items_dic[self.keys_map[key]] = cmt[key]
                    else:
                        items_dic[self.keys_map[key]] = "NULL"
                items_dic['productId'] = product_id
                open_sql = MySQL_API(**self.con_SQL_info)
                open_sql.process_item(items_dic)

            else:
                cmt_all_info_list = map(lambda x: str(cmt[x]) if x in cmt else "NA", self.key_list)
                cmt_all_info_list.insert(0, product_id)
                cmt_all_info = " | ".join(cmt_all_info_list)
                with open(self.file_path, 'a') as f:
                    f.write(cmt_all_info+"\n")

        if comment_page_index == 0 and 'maxPage' in comments_dic:
            max_page_num = int(comments_dic['maxPage'])
            if max_page_num > 1:
                return max_page_num
            else:
                return 0

    def start(self, id_list):
        items_number = len(id_list)
        print "total items number: %d" % items_number
        comments_page_num = 0

        for p_id in id_list:
            print "produce ID:%s" % p_id
            page_max_index = self.get_comments_info((p_id, 0))
            if type(page_max_index) is int and page_max_index > 0:
                comments_page_num += page_max_index
            else:
                print page_max_index
                continue
            print "comments page num:%s" % page_max_index

            pool2 = ThreadPool(30)
            try:
                pages_list = range(1, page_max_index)
                params = zip([p_id] * (page_max_index - 1), pages_list)

                pool2.map(self.get_comments_info, params)
                pool2.close()
                pool2.join()

            except Exception, e:
                sleep(0.1)
                pool2.terminate()
                print Exception, e

            items_number -= 1
            print "remain items number: %d" % items_number

        print "total pages number of comments: %s" % comments_page_num


if __name__ == "__main__":

    ##
    product_list = ['电脑', '酒', '相机', '玩具', '冰箱', '电视', '空调', '洗衣机']
    con_sql = {'username': 'root', 'passwd': 'root', 'db_name': 'test'}

    # now_time = datetime.now().strftime('%Y-%m-%d-%H-%M')

    JD = JingDong(proxy_flag=False, sql=con_sql)

    produce_id_list = set()
    for product in product_list:
        search_target = product
        num = JD.search_page_rst(search_target)
        print product, len(num)
        produce_id_list.update(num)

    print len(produce_id_list)
    start_time = datetime.now()
    try:
        JD.start(produce_id_list)
    except Exception, e:
        print Exception, e
    finally:
        print "run time: %s" % (datetime.now() - start_time)
        print JD.cmt_total_num



