#-*- coding: UTF-8 -*-
__author__ = '愚夫'
import requests,urllib,argparse,time,random,json,logging,sys,os
from pyquery import PyQuery as pq

#过滤器基类
class Filter(object):
    def __init__(self,response):
        if isinstance(response,pq):
            raise Exception('unsuppored filter object')
        self.response = response
    def filter(self):
        pass
#导出基类
class Exporter(object):
    def __init__(self,obj):
        if not isinstance(obj,scraper):
            raise Exception('unsupported export object type')
        self.obj = obj
    def run(self):
        pass

#爬虫类
class scraper(object):
    commentUrl = 'https://club.jd.com/comment/productPageComments.action?productId={productId}&score=0&sortType=5&page={page}&pageSize=100'
    searchUrl = 'https://search.jd.com/Search?keyword={keyword}&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page={page}&s=52&click=0'
    filters = []
    exporter = None
    productIds = []
    comments = []
    pnum = 0
    cnum = 0
    tag = 0
    output = None


    def getProductIdsByKeyword(self,keyword):
        preqTimes = range(1,self.pnum/28+1)
        for pi in preqTimes:
            url = self.searchUrl.format(keyword=keyword,page=pi)
            logging.info('starting to scrape '+url)
            self._getProductPage(url)
            time.sleep(random.randint(0,10))
        logging.info('starting to scrape comments')
        for i in self.productIds:
            logging.info('starting to scrape comments for '+i)
            self.getCommentsByProductId(i)
        logging.info('scraping complete,starting to export')
        if self.exporter:
            self.exporter.run()
        else:
            with open(self.output,'w') as f:
                for c in self.comments:
                    f.writelines(c)
        logging.info('scraping complete')

    def getCommentsByProductId(self,productId):
        creqTimes = range(1,int(self.cnum)/10+1)
        for ci in creqTimes:
            url = self.commentUrl.format(productId=productId,page=ci)
            self._getCommentPage(url)
            time.sleep(random.randint(0,10))
        pass

    def _getProductPage(self,url):
        print url
        content = requests.get(url).content
        content = pq(content)
        lists = content('li.gl-item')
        for l in lists:
            item = pq(l)
            #过滤器
            self.applyFilter(item)
            pid =  item.attr('data-pid')
            if pid is not None:
                self.productIds.append(pid)
        pass

    def _getCommentPage(self,url):
        content = requests.get(url).json()
        comments = content.get('comments',None)
        if comments is None:
            return
        for c in comments:
            try:
                self.comments.append([str(c.get('id',None)),c.get('content',None).encode('utf-8')+os.linesep]) if self.tag ==0 else self.comments.append([str(c.get('id',None)),c.get('showOrderComment',None).get('content',None).encode('utf-8')+os.linesep])
            except:
                pass
        pass

    def applyFilter(self,obj):
        for f in self.filters:
            f.filter(obj)

    def addFilter(self,filterInstance):
        raise Exception('unsupported filter type') if not isinstance(filterInstance,Filter) else self.filters.append(filterInstance)
        pass

    def setExporter(self):
        self.exporter = Exporter(self)

    def scrape(self,keyword):
        logging.info('starting to search product id for keyword')
        self.getProductIdsByKeyword(keyword)
        pass


if __name__ == '__main__':
    logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s',level=logging.INFO)
    parser = argparse.ArgumentParser()
    parser.add_argument('key',help='keyword to search')
    parser.add_argument('-types',default='0',help='0:search,1:product id')
    parser.add_argument('-pnum',help='product num',default=100)
    parser.add_argument('-cnum',help='comment num',default=10000)
    parser.add_argument('-output',help='output file',default='jcomments.txt')
    parser.add_argument('-tag',help='strip images',default=0)
    logging.info('starting to parse arguments')
    args = parser.parse_args()
    key,types,pnum,cnum,output,tag = args.key,args.types,args.pnum,args.cnum,args.output,args.tag
    logging.info('starting to init scraper')
    s = scraper()
    s.pnum = pnum if pnum else 100
    s.cnum = cnum if cnum else 100
    s.tag = tag if tag else 0
    s.output = output if output else 'jcomments.txt'
    logging.info('scraping start')
    s.scrape(key.decode('gbk').encode('utf-8'))

