'''
v1.01 2018-03-06 first commit.
'''

import re
import os
import time
import urllib.request
import threading

#set url headers
#url = "http://search.jd.com/Search?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&page=3"
headers=("User-Agent","Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders=[headers]


#open new file by write
#get current file

AbsFile=__file__

#get current path

FileName=AbsFile[:AbsFile.rfind("\\")]
#print(FileName)
DesFileName = FileName + "\\jd_product.txt"
fwrite = open(DesFileName,"w")


#loop for get 30 pages
for countPage in range(0,50):
    url = "http://search.jd.com/Search?keyword=%E9%9B%B6%E9%A3%9F&enc=utf-8&page=" + str((countPage * 2) + 1)

    #get data and decode
    data = opener.open(url).read().decode("utf-8")

    #set filter name
    pat_name = '<strong class="J_.*?title="(.*?)"'
    res_name = re.compile(pat_name,re.S).findall(data)


    #set filter price
    pat_price = '<strong class="J_.*?<em>￥</em><i>(.*?)</i>'
    res_price = re.compile(pat_price,re.S).findall(data)

    #set filter saler
    pat_saler = '<strong class="J_.*?class="curr-shop".*?">(.*?)</a>'
    res_saler = re.compile(pat_saler,re.S).findall(data)
    
    #set filter comment
    pat_comment = '<strong><a id="J_comment_.*?">(.*?)</a>条评价'
    
    res_comment = re.compile(pat_comment,re.S).findall(data)
    #print result
    num_name = len(res_name)
    num_price = len(res_price)
    num_saler = len(res_saler)
    num_comment = len(res_comment)
    #print("------------------------------")


    for count in range(0,num_name):
        pName = res_name[count].strip()
        pPrice = res_price[count].strip()
        pSaler = res_saler[count].strip() if(count < num_saler) else "无"
        pComment = res_comment[count].strip()
        json = '{"商品名":"' + pName + '","商品价格":"' + pPrice + '","商品出售方":"' + pSaler + '","商品评论数":"' + pComment + '"}\n'
        #print(json)
        #write new file
        fwrite.write(json)


#close new file
fwrite.close()
print("爬取数据完成")

    
'''
#module-III
#set url headers
#     "http://sclub.jd.com/comment/productPageComments.action?productId=2730481&score=0&sortType=5&page=0&pageSize=10"

import sys
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)

url = "http://sclub.jd.com/comment/productPageComments.action?productId=2730481&score=0&sortType=5&page=0&pageSize=10"
headers=("User-Agent","Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders=[headers]

#get data and decode
data = opener.open(url).read().decode("UTF-8","ignore").translate(non_bmp_map)
#print(len(data))


#set filter name
pat_name = '},{"id":.*?,"content":"(.*?)"'
res_name = re.compile(pat_name,re.S).findall(data)


num_name = len(res_name)
print(len(res_name))
print("------------------------------")

for count in range(0,num_name):
    print("\t第" + str(count) + "个=[" + eval('u"' + res_name[count] + '"') + "]")
    #print("\t第" + str(count) + "个=[" + res_name[count] + "]")
'''
