import requests
import time
import json
from urllib.parse import urlencode
from copy import deepcopy
import re
from openpyxl import load_workbook
import random





detail_url = 'https://kf.jd.com/orderDetail/queryOrderDetail.action?'

page_url = 'https://club.jd.com/comment/skuProductPageComments.action?'

detail_payload = {
    'id': '158112100828_100005961481'
}

# cookie = '__jdu=16185516383251456870777; areaId=2; _base_=YKH2KDFHMOZBLCUV7NSRBWQUJPBI7JIMU5R3EFJ5UDHJ5LCU7R2NILKK5UJ6GLA2RGYT464UKXAI5KK7PNC5B5UHJ2HVQ4ENFP57OC2LMMUEMI3EC6RZ5U433363YQANSQPBWUUGJFLNKIT2ENFC565IIX5UVPWLXIHLRTTSIWB4JZFYXUMXMCXGARN235SMJQHSXU4WI6U67KX3UU3CJVA764BI4ZTHLHHOCSE44U4JESAES42ZLQDKWQ36MF3DCK3KRPYBJZAFHLQ3XUMVJPOWCDAD2H2QZ2MIP7UOCMHLOSWXNLHQ; shshshfpa=28e547d6-2f28-871c-bc86-3ca4fcda6e31-1618557610; shshshfpb=jOWJEcvL%20zG1tWK6llIoM%2Fw%3D%3D; user-key=5a3d4ac0-e51a-44fc-9b48-0607a3658a0c; ipLoc-djd=2-2813-51976-0; jwotest_product=99; TrackID=1giZs75T7yQ4McnaIUrdi_JPMyv0CcaHvxcReG8LHPLLNR7IZINQ-e_G6Aofo1PVy0wYBuyq_2VS2UsyMc9Cr38oZhE6Z4_JbXgEUWZeIvtU; mt_xid=V2_52007VwMVUlRcVloYSxBeBmMDF1ZaUVRbGU8fbAcwBRJQWwpWRh4cG1sZYlASWkEIBQlKVRhVAmVUQlddC1ZaGnkaXQZnHxNXQVtQSx9NEl0NbAYSYl1oUmodThFfAWMHE1dZaFZeHEs%3D; unpl=V2_ZzNtbRdWEUJwDxFUKRELUGILEwpLAxYUfFxPUHsQX1AwUBUIclRCFnUUR1FnGl4UZwQZWERcQRNFCEdkeB5fA2AFEFlBZxBFLV0CFi9JH1c%2bbRJcRV5CE3cPRVB7Gmw1ZAMiXUNnQxJ1AUJQex5eDGUAFl1HU0QccwlAUn0pbAJXMyJdRldLFHM4R2R6KR5ROwYSXEVTRFh1D0Zdfx1cAmUKEF5GV0YRcgFAVX0fWjVmMxE%3d; __jdv=76161171|baidu-search|t_262767352_baidusearch|cpc|106807362512_0_d0cf46f1c9fd490f8ed08e85182dfb6d|1619167659465; PCSYCityID=CN_310000_310100_0; shshshfp=52623334480e53962a34fd7f5cd6029b; __jdc=122270672; __jda=122270672.16185516383251456870777.1618551638.1619322893.1619328679.12; cid=NWJHMDkyNnFYNTM4N3RINzk2OGFUMzE2MWFLNTU4MmJQMzU0M2paNzEwNHdCMzEy; wlfstk_smdl=rqqprf39ucqdss0pu5u0i8rw4chl5e1x; thor=8B34F3F3A4ABB4499F47FE5A20006304DDD0064072105A3FACC2DEAD1BA8A97BC7F1D230741E87C4ECE99D5D8D8BD53A80099603D36C50D6D6B1BA2021A1BB91AD89BD7C1720B9A5E3FC92992012B7DC5344B0FA6ADD682304C17E1C2DB4E11DCB8CC901099D07D16205141B2AB29F013038561CE217C4F7ED1073E0E549246121D6D3C0B9842507DC70E7E71F5DEA66; pinId=2zOkqJqAfpYX4-KR-rfzzA; pin=izhijue001; unick=izhijue001; ceshi3.com=000; _tp=Igkli8ifDSlf0i1mITw3cA%3D%3D; _pst=izhijue001; 3AB9D23F7A4B3C9B=KRLX6PLJAA5NT5DA6TKPI4PSY4Y3BXUN3NIUZK7EM3TSKVXHHGK3CHXLCZJMJIVOXF7GYPXC3QVRRVSIK4UZC4JWFE; JSESSIONID=7458F40FA735D9F5BA932FC739996069.s1'


# headers = {
#     "cookie": cookie,
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36"}
# response = requests.post(detail_url, headers=headers, data=json.dumps(detail_payload))
# datas = response.json()
# print(type(datas))
# print(datas)

page_payload = {
    'callback': 'fetchJSON_comment98',
    'productId': 526831,
    'score': 0,
    'sortType': 5,
    'page': 2,
    'pageSize': 10,
    'isShadowSku': 0,
    'rid': 0,
    'fold': 1
}
# url = page_url + urlencode(page_payload)
# response = requests.get(url, headers=headers).text
# # datas = response.json()
# html = response[20:-2]
# json_data = json.loads(html)
# jso = json_data["comments"]
# print(jso)
proxy = {
    "http": "http://14.119.82.122:80"
}

def get_comment(url, s, e, cookie, batch):
    id = re.search('com/(\d+)\.html', url)[1]
    print(id)
    page_load = deepcopy(page_payload)
    page_load['productId'] = id
    dict_comment = {}
    for i in range(s, e):
        page_load['page'] = i
        href = page_url + urlencode(page_load)
        # headers = {
        #     'accept': '*/*',
        #     'accept-encoding': 'gzip, deflate, br',
        #     'accept-language': 'zh-CN,zh;q=0.9',
        #     'Connection': 'keep-alive',
        #     'cookie': cookie,
        #     'Host': 'club.jd.com',
        #     'referer': 'https://item.jd.com/',
        #     'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"',
        #     'sec-ch-ua-mobile': '?0',
        #     'sec-fetch-dest': 'script',
        #     'sec-fetch-mode': 'no-cors',
        #     'sec-fetch-site': 'same-site',
        #     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36'
        # }
        # print(headers)
        headers = {
            'cookie': cookie,
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36'
        }
        response = requests.get(href, headers=headers, allow_redirects=False, proxies=proxy)
        response_t = response.text
        html = response_t[20:-2]
        print(html)
        json_data = json.loads(html)
        jso = json_data["comments"]
        # try:
            # change = response.headers['Set-Cookie']
            # # print(change)
            # JSESSIONID = re.search('JSESSIONID=(\S+?);', change)[1]
            # # print(JSESSIONID)
            # cookie = re.sub('JSESSIONID=(\S+)','JSESSIONID=' + JSESSIONID, cookie)
            # # print(cookie)
        # except Exception as e:
        #     print(e)
        if not jso:
            break
        dict_comment['{}'.format(i + 1)] = jso
        print('第{}页已爬取完成'.format(i + 1))
        time.sleep(random.randrange(4, 5))
    with open('download/{}_{}.json'.format(id, batch), 'w', encoding='utf-8') as f:
        json.dump(dict_comment, f, ensure_ascii=False)
    print(url + '已爬完')

if __name__ == '__main__':
    # wb = load_workbook('商品名&链接-JD.xlsx')
    # sheets = wb.sheetnames
    # print(1)
    # urls = []
    # for j in range(len(sheets)):
    #     sheet = wb[sheets[j]]
    #     rows = sheet.max_row
    #     for i in range(1, rows):
    #         html = str(sheet.cell(row=i + 1, column=3).value).strip()
    #         match = re.match('https://item.jd.com/.+', html)
    #         if match:
    #             print(html)
    #             urls.append(html)
    # with open('html.txt', 'w') as file:
    #     for url in urls:
    #         file.write(url + '\n')
    cookie = '__jda=122270672.1619340604157353112913.1619340604.1619340604.1619340604.1; __jdb=122270672.1.1619340604157353112913|1.1619340604; __jdc=122270672; __jdv=122270672|direct|-|none|-|1619340604159; __jdu=1619340604157353112913; shshshfp=3f7e13e4be153d3a8202b6953a61c44e; shshshfpa=a7738e5b-12a3-580c-dc1f-721fd3fd029e-1619340604; shshshsID=c618de22ce6da50783b074cdc7773e35_1_1619340605144; shshshfpb=ngyqSDfWgUNVLtaoaI2iEUQ%3D%3D; areaId=1; ipLoc-djd=1-72-55653-0; JSESSIONID=70BB8C64EB0DD9E6623B10E1A86FBEE1.s1; jwotest_product=99'
    with open('html.txt', 'r') as f:
        hrefs = f.readlines()
    for href in hrefs:
        print("processing ", href)
        get_comment(href, 0, 33, cookie, 1)
    for href in hrefs:
        print("processing ", href)
        get_comment(href, 33, 66, cookie, 2)
    for href in hrefs:
        print("processing ", href)
        get_comment(href, 66, 100, cookie, 3)
    # get_comment('https://item.jd.com/100011743096.html')






