import urllib.request
import urllib.error
import re
import time
import os
import random


"""
本文件用来提取User_Agent,ip,referer
"""

pc_Browser = [
    'safari 5.1 – MAC',
    'safari 5.1 – Windows',
    'IE 9.0',
    'IE 8.0',
    'IE 7.0',
    'IE 6.0',
    'Firefox 4.0.1 – MAC',
    'Firefox 4.0.1 – Windows',
    'Opera 11.11 – MAC',
    'Opera 11.11 – Windows',
    'Chrome 17.0 – MAC',
    'Maxthon',
    'TT',
    'The World 2.x',
    'The World 3.x',
    'sougou 1.x',
    '360',
    'Avant',
    'Green Browser'
]

pc_User_Agent = {
    "safari 5.1 – MAC":"User-Agent:Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "safari 5.1 – Windows":"User-Agent:Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.502",
    "IE 9.0":"User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.02",
    "IE 8.0":"User-Agent:Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "IE 7.0":"User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "IE 6.0":"User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Firefox 4.0.1 – MAC":"User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Firefox 4.0.1 – Windows":"User-Agent:Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera 11.11 – MAC":"User-Agent:Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera 11.11 – Windows":"User-Agent:Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Chrome 17.0 – MAC":"User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Maxthon":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0",
    "TT":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "The World 2.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "The World 3.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "sougou 1.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "360":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Avant":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Green Browser":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"
}

# ip_pools
'''
    '115.206.174.95:8888',
    '218.58.146.84:8118',
    '121.225.25.120:3128',
    '119.18.234.140:8090',
    '39.88.41.99:8118',
    '223.241.79.167:8010',
    '27.40.135.167:61234',
    '101.204.16.68:8118',
    '110.73.49.173:8123',
    '116.58.227.227:44311'
   
'''
ip_list = [
    '192.168.1.7',
    '115.206.174.95:8888',
    '218.58.146.84:8118',
    '121.225.25.120:3128',
    '119.18.234.140:8090',
    '39.88.41.99:8118',
    '223.241.79.167:8010',
    '27.40.135.167:61234',
    '101.204.16.68:8118',
    '110.73.49.173:8123',
    '116.58.227.227:44311'
]


# referer = ''

# 根据选出来的浏览器，挑选对应的用户代理地址
def choice_User_Agent():
    pc_ua_key = random.choice(pc_Browser)
    user_Agent = pc_User_Agent[pc_ua_key]
    ua_content = {(user_Agent.split(":")[0]): (user_Agent.split(":")[1])}
    return ua_content


# 安装ua, ip为全局

def install_ua_ip(user_agent,ip):
    proxy = urllib.request.ProxyHandler({'http': ip})
    opener = urllib.request.build_opener(proxy, urllib.request.HTTPHandler)
    opener.addheaders = [('User-Agent', user_agent['User-Agent'])]
    urllib.request.install_opener(opener)

#获取ua ip 地址

def get_ua_ip():
    ua = choice_User_Agent()
    ip = random.choice(ip_list)
    return (ua, ip)

#第一遍获取数据

def get_date(keyword, number):
    url = 'https://search.jd.com/Search?keyword={}&enc=utf-8&page={}&scrolling=y'.format(keyword, number)
    try:
        response = urllib.request.urlopen(url,timeout = 5).read().decode('utf-8')
        pat1 = r'<strong class="J_(\d{6,11})".*?(\d{1,3}.\d{1,2}).*?</div>.*?<em>(.*?)</em>\s{1}.*?>(.{1,5}</a>条评价)</strong>.*?<div class="p-shop".*?>(.*?)</div>'
        data = re.compile(pat1, re.S).findall(response)
    except(urllib.error.URLError, Exception)as e:
        print(e)
        print('ip失效')
        time.sleep(0.5)
        user_agent, ip = get_ua_ip()
        install_ua_ip(user_agent, ip)
        get_date(keyword,number)

    else:
        return data

#清洗存储数据

'''
如1298276" 11.90		</div><em><span class="p-tag" style="background-color:#c81623">京东超市</span>良品铺子 香辣味海带丝 即食海味小吃 海草海带片 <font class="skcolor_ljg">零食</font>独立小包装218g</em> 
>13万+</a>条评价</strong><div class="p-shop" ><span class="J_im_icon"><a target="_blank" class="curr-shop" onclick="searchlog(1,1000006804,0,58)" href="//mall.jd.com/index-1000006804.html" title="良品铺子京东自营旗舰店">良品铺子京东自营旗舰店</a></span></div>'''

def clean_save_date(data, filename):
    for snacks_id, snacks_price, snacks_name, snacks_comment_number, snacks_sell_market in data:
        name_pat = '<.*?>京东超市|\s|京东超市'
        snacks_name = re.sub(name_pat,'', snacks_name)
        nummer_pat = '</a>'
        snacks_comment_number= re.sub(nummer_pat, '', snacks_comment_number)
        market_pat = '<.*?>|\s'
        snacks_sell_market = re.sub(market_pat, '', snacks_sell_market)
        if snacks_sell_market == '':
            snacks_sell_market = '京东超市'
        clean_date = {'商品名':snacks_name,'商品编号':snacks_id,'商品价格':str(snacks_price)+'RMB','商品出售方':snacks_sell_market,'评论数':snacks_comment_number}
        with open(file_name,'a', encoding='utf-8')as f:
            f.write(str(clean_date) + '\r\n')


#获取评论
'''
如 https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv6350&productId=4825002&score=0&sortType=5&page=1&pageSize=10&isShadowSku=0&rid=0&fold=1
'''
def get_comment(data,comment_file,number):
    try:
        for snacks in data:
            snacks_id = snacks[0]
            comment_file = '{comment_dir}{str(number)}_{str(snacks_id)}.txt'
        with open(comment_file,'w',encoding='gbk')as f:
            f.write('')
            url = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv6350&productId={snacks_id}&score=0&sortType=5&page='
            for i in range(2):
                url = url + i
                response = urllib.request.urlopen(url).read().decode('gnk', 'ignore')
                comment_pat = '"content":"(.*?)",".*?"nickname":"(.*?)"'
                rst = re.compile(comment_pat,re.S).findall(response)
                comment_info ={''}
                for i, j in rst:
                    comment_info += j+':'+i+'\r\n'
                with open(comment_file,'a',encoding='gbk')as f:
                    f.write(comment_info)

                if len(rst)<10:
                    break

    except (Exception,error.URLError) as e:
        print('错误，重新安装opener')
        time.sleep(0.5)
        user_agent, ip = get_ua_ip()
        install_ua_ip(user_agent, ip)
        get_date(keyword, number)

def main():
    user_agent, ip = get_ua_ip()
    opener = install_ua_ip(user_agent,ip)
    urllib.request.install_opener(opener)
    keyword = '零食'
    keyname = urllib.request.quote(keyword)
    file_dir = './商品信息_'+keyword+'/'
    if not os.path.exists(file_dir):
        os.mkdir(file_dir)
    comment_dir = file_dir+'comment/'
    if not os.path.exists(comment_dir):
        os.mkdir(comment_dir)
    info_file = comment_dir+keyword+'.txt'
    with open(info_file,'a',encoding='utf_8')as f:
        f.write('')
    for number in (1,101):
        with open(info_file,'a', encoding='utf-8') as f:
            f.write('第{number}页\r\n')
            data = get_date(keyword,number)
            clean_save_date(data, filename)
            get_comment(data,comment_file,number)

if __name__ == '__main__':
    main()








