#-*-coding:utf-8-*-

import urllib.request
import random
import os
import sys
import re
from urllib import request,error

#定义关键词
def get_web_data(keyword,pages):
    #用户代理池
    uapools=[
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0",
        ]

    #寄存文件地址及IP
    def check_file():
        if not os.path.exists("./ip.txt"):
            with open("./ip.txt","w") :
                pass
        if not os.path.exists('./comment'):
            os.makedirs('./comment')

    #获取代理IP:
    def get_ip():
        with open("./ip.txt","r") as fh:
            ip=""
            ips=fh.readlines()
            if len(ips)>0:
                return ips[0]

            else:
                data=request.urlopen("http://tvp.daxiangdaili.com/ip/?tid=557627936365080&num=1&filter=on").read().decode("utf-8","igonre")
                if len(data)>0:
                    return data
               
    #构建用户代理和ip代理
    def creat_ip(ip):
        thisua=random.choice(uapools)
        headers=("User-Agent",thisua)
        thisip=get_ip()
        proxy=request.ProxyHandler({"http":ip})
        opener=request.build_opener(proxy,request.HTTPHandler)
        opener.addheaders=[headers]
        request.install_opener(opener)

        
    

        
        
    #获取网络数据
    def get_web_data(url,pat,code):

        print("**********************************************************")
        #print('当前访问url地址为:'+url)
        while True:
            try:
                thisip=get_ip()
                creat_ip(thisip)
                data=request.urlopen(url,timeout=50).read().decode("utf-8","igonre")
                rst = re.compile(pat,re.S).findall(data)
                #将能用的代理IP写入文件中,方便下次使用
                with open("./ip.txt","w") as fh:
                    ips=fh.write(thisip+"\n")
                return rst
            except (error.URLError,Exception) as e:
                if hasattr(e, "code"):
                    print(e.code)
                elif hasattr(e, "reason"):
                    print(e.reason)
                else:#忽略解码出错,返回空数据继续执行
                    print(e)
                    return ""
                with open("./ip.txt","r") as fh:
                    ips=fh.readlines()
                    #如果代理IP失效,删除代理IP中对应的代理IP
                    with open("./ip.txt","a") as fhw:
                        for ip in ips:
                            if ip==thisip:
                                continue
                            else:
                                fhw.write(ip+"\n")                

    #获取自定义商品信息,
    def get_home_page(url):
        pat='<li class="gl-item".*?>.*?<div class="gl-i-wrap">.*?<div class="p-price">.*?<i>(.*?)'+\
            '</i></strong>.*?</div>.*?<div class="p-name.*?">.*?'+\
            '<a.*?href="(.*?)".*?<em>(.*?)</em>.*?<div class="p-commit">.*?<strong>'+\
            '<a id=".*?".*?>(.*?)</a>.*?</strong>.*?<div class="p-shop".*?>(.*?)</div>'
        #调用通用请求数据方法
        data=get_web_data(url,pat,"utf-8")
        #将商品信息写入文件 p[0]:价格 p[1]:详情页url p[3]:评论数 p[4]:店铺
        for p in data:
            with open("./商品数据.txt",'a+') as product_fh:
                product_fh.write("{“p_name”："+re.compile('</?\w+[^>]*>',re.S).sub('',p[2])+\
                         "，“p_price”："+p[0]+"，“p_shop”："+\
                         re.compile('</?\w+[^>]*>',re.S).sub('',p[4]).strip('\t').strip('\n')+\
                         "，“p_comment”："+re.compile('</?\w+[^>]*>',re.S).sub('',p[3])+"}\n")
            #获取商品评论信息
            get_detail_page(re.compile('</?\w+[^>]*>',re.S).sub('',p[2]),p[1])

    #获取商品详情页数据&商品ID来取得评论
    def get_detail_page(product_name,product_url):
        pat='<strong class="topic"><a href.*?>(.*?)</a>'
        #构建url取得商品ID
        if re.match(r'^//',product_url):
            product_id=re.compile('//item.jd.com/(.*?).html').findall(product_url)
        else:
            pat='<meta http-equiv="mobile-agent" content="format=xhtml; url=(.*?)">'
            url=get_web_data(product_url,pat,'gbk')
            if not url:
                return
            product_id=re.compile('product/(.*?).html').findall(url[0])

        if not product_id:
            return
        #保存商品ID作为文件名
        with open("./comment/"+product_id[0]+".txt",'w') as fh:
            for page in [0,1]:
                comment_url="https://sclub.jd.com/comment/productPageComments.action?productId="+product_id[0]+"&score=0&sortType=5&page="+str(page)+"&pageSize=10"
                pat='"content":"(.*?)"'
                data=get_web_data(comment_url, pat, 'utf-8')
                for comment in data:
                    fh.write(re.compile('</?\w+[^>]*>',re.S).sub('',comment)+"\n")
                    
    seach_keyword=request.quote(keyword)
    for i in [n for n in range(0,int(pages)) if n%2==1]:
        print("-----------------------------------------------------------------")
        print("第"+str(i)+"页")
        print("-----------------------------------------------------------------")
        print("https://search.jd.com/Search?keyword="+seach_keyword+"&enc=utf-8&page="+str(i))
        thisurl="https://search.jd.com/Search?keyword="+seach_keyword+"&enc=utf-8&page="+str(i)
        get_home_page(thisurl)

    

    check_file()

get_web_data("零食",30)#爬行数量

