import scrapy
from kickstart import items
import json
import re
import copy
import pickle
from  bson.binary import Binary

class CtripSpider(scrapy.Spider):
    name = "ctripview"
    allowed_domains = ["hotels.ctrip.com", "images4.c-ctrip.com",
    "you.ctrip.com"]
    start_urls = ["http://you.ctrip.com/place/countrylist.html"]
    def __init__(self, spiderid=None, step=None, cityfilepath=None, *args, **kwargs):
        super(CtripSpider, self).__init__(*args, **kwargs)
        if spiderid and step:
            self.start = int(spiderid) * int(step)
            self.end = self.start + int(step)
        else: 
            self.start = None
            self.end = None
        print self.start, self.end
        
        self.cities = None
        if cityfilepath:
            cityfile = open(cityfilepath, 'rb')
            cities = pickle.load(cityfile)
            #relay on top level script control
            if self.start >= 0:
                if self.end and self.end <= len(cities):
                    self.cities = cities[self.start:self.end]
                else:
                    self.cities = cities[self.start:]

    def createCity(self, cname, data, group):
        city = items.CtripCity()
        tmps = data.split(':')[1].split('|')
        city['cname'] = cname
        city['pinyin'] = tmps[0].replace('"', '').lower()
        city['cid'] = tmps[2].replace('"', '')
        city['group'] = group.split(':')[1]
        return city

    def json_parse(self, data):
	replace_fmt = re.compile('\{|\}')
	replace_fmt1 = re.compile('\[|\]')	
	json_strs_tmp = replace_fmt.sub('', data)
	json_strs = replace_fmt1.sub('', json_strs_tmp)
        datas = json_strs.decode('gb2312').split(",")
	index = 0
	cities = []
	while index < len(datas):
            city = None
            tmps = datas[index].split(':')
            if tmps[0] == 'display':
                city = self.createCity(tmps[1], datas[index+1], datas[index+2])
            elif tmps[1] == 'display':
                city = self.createCity(tmps[2], datas[index+1], datas[index+2])
            if city:
                cities.append(city)
            index += 3
	return cities
			
    #def storeHTMLCity(self, cid, response):
    
    def storePic(self, response):
        sightPic = items.CtripHotelPIC()
        sightPic['url'] = response.url
        sightPic['pic'] = Binary(response.body)#.decode('ascii').encode('utf-8')
        sightPic['sightid'] = response.meta['sid']
        sightPic['cid'] = response.meta['cid']
        yield sightPic

    #def storePic(self, response):
     #   sightPic = items.CtripHotelPIC()
      ##  sightPic['url'] = response.url
       # sightPic['pic'] = Binary(response.body)#.decode('ascii').encode('utf-8')
       # sightPic['hotelid'] = response.meta['hotelid']
       # yield sightPic

    def parseHotelCommentsPage(self, response): 
        response.meta['cpageid'] = 1
        commentHTML = items.CtripSightCommentHTML()
        commentHTML['cid'] = response.meta['cid']
        commentHTML['sightid'] = response.meta['sid']
        commentHTML['url'] = response.url
        commentHTML['html'] = response.body.decode("gb18030").encode("UTF-8")
        commentHTML['pageid'] = response.meta['cpageid']
        yield commentHTML

        #deal page one:
        comments = response.selector.xpath('/html/body/div[4]/div/div[1]/div/div[1]/div[3]/div[@class="comment_single"]')
        for comm in comments:
            comment = items.CtripSightComment()
            comment['sightid'] = response.meta['sid']
            comment['cid'] = response.meta['cid']
            comment['traveldate'] = comm.xpath('ul/li[1]/span[3]/text()').extract()
            comment['commentdate'] = comm.xpath('ul/li[4]/span[1]/span/em/text()').extract()
            comment['scoretext'] = comm.xpath('ul/li[1]/span[1]/span[2]/text()').extract()
            score = comm.xpath('ul/li[1]/span[1]/span[1]/span/@style').extract()
            comment['score'] = float(score.lstrip("width:").rstrip('%;')) / 100.0 * 5.0
            comment['uname'] = comm.xpath('div/span/a/text()').extract()
            comment['comment'] = comm.xpath('ul/li[2]/span/text()').extract()
            yield comment
        
    def parseHotelComments(self, response):
        response.meta['cpageid'] = 1
        commentHTML = items.CtripSightCommentHTML()
        commentHTML['cid'] = response.meta['cid']
        commentHTML['sightid'] = response.meta['sid']
        commentHTML['url'] = response.url
        commentHTML['html'] = response.body.decode("gb18030").encode("UTF-8")
        commentHTML['pageid'] = response.meta['cpageid']
        yield commentHTML

        #deal page one:
        comments = response.selector.xpath('/html/body/div[4]/div/div[1]/div/div[1]/div[3]/div[@class="comment_single"]')
        for comm in comments:
            comment = items.CtripSightComment()
            comment['sightid'] = response.meta['sid']
            comment['cid'] = response.meta['cid']
            comment['traveldate'] = comm.xpath('ul/li[1]/span[3]/text()').extract()
            comment['commentdate'] = comm.xpath('ul/li[4]/span[1]/span/em/text()').extract()
            comment['scoretext'] = comm.xpath('ul/li[1]/span[1]/span[2]/text()').extract()
            score = comm.xpath('ul/li[1]/span[1]/span[1]/span/@style').extract()
            comment['score'] = float(score.lstrip("width:").rstrip('%;')) / 100.0 * 5.0
            comment['uname'] = comm.xpath('div/span/a/text()').extract()
            comment['comment'] = comm.xpath('ul/li[2]/span/text()').extract()
            yield comment

        #deal other pages
        poiIds = response.selector.xpath('/html/body/div[5]/div/div[1]/div[1]/ul/li[2]/span[2]/span[2]/a/@href').extract()
        poiId = 0 
        if len(poiIds) > 0:
            start = poiIds.rfind('/')
            end = poiIds.rfind('.html')
            poiId = poiIds[start+1:end]
        pages = response.selector.xpath('/html/body/div[5]/div/div[1]/div[4]/div[5]/div[4]/div[11]/div/span/b/text()').extract()
        pagenum = 0
        if len(pages) > 0:
            pagenum = int(pages[0])
        #if pagenum > 5:
         #   pagenum = 5

        prefix = "http://you.ctrip.com/destinationsite/TTDSecond/SharedView/AsynCommentView?poiID=%s&districtId=%s&pagenow=%s&districtEName=%s&resourceId=%s"
        for page in range(2, pagenum+1):
            tmp_url = prefix % (poiId, response.meta['cid'], page,
            response.meta['cpinyin'], response.meta['sid'])
            response.meta['cpageid'] = page
            yield scrapy.Request(tmp_url, callback=self.parseHotelCommentsPage, meta=response.meta)
            print "request url ", tmp_url
    
    def parseSightInfo(self, response):
        sight = items.CtripSight()
        start = response.url.rfind('/')
        end = response.url.rfind('.html')
        sid = int(response.url[start+1:end])
        
        sight['cid'] = response.meta['cid'] 
        sight['sid'] = sid
        sight['cname'] = response.selector.xpath('/html/body/div[4]/div[2]/div/div[1]/h1/a/text()').extract()
        sight['ename'] = response.selector.xpath('/html/body/div[4]/div[2]/div/div[1]/p/text()').extract()
        sight['score'] = response.selector.xpath('/html/body/div[5]/div/div[1]/div[1]/ul/li[1]/span/b/text()').extract()
        sight['intro'] = response.selector.xpath('/html/body/div[5]/div/div[1]/div[4]/div[2]/div[2]/div/p/text()').extract()
        sight['address'] = response.selector.xpath('/html/body/div[5]/div/div[2]/div[2]/p/text()').extract()
        
        yield sight
        print "yield sight :", sight['cname']
        
        #store picture
        default_picurl = response.selector.xpath('//*[@id="detailCarousel"]/div/div[1]/a/img/@src').extract()
        if len(default_picurl) > 0:
            print "picure url: ", default_picurl[0]
            yield scrapy.Request(default_picurl[0],
            callback=self.storeSightPic, meta={'sightid': sid, 'cid':
            response.meta['cid']})
        
        sightHTML = items.CtripSightHTML()
        sightHTML['sid'] = sid 
        sightHTML['cid'] = response.meta['cid'] 
        sightHTML['url'] = response.url 
        sightHTML['html'] =  response.body.decode("gb18030").encode("UTF-8")
        yield sightHTML

        comment_num = response.selector.xpath('//*[@id="comment"]/div/h2/span/span/text()').extract()
        if len(comment_num) <= 0:
            print "comment_num < 0", response.url, comment_num
        else:
            prefix = "http://you.ctrip.com/%s" 
            comments_url = response.selector.xpath('//*[@id="yyDianping"]/@href').extract()
            if  len(comments_url) > 0:
                tmp_url = prefix % (comments_url[0])
                yield scrapy.Request(tmp_url, callback=self.parseHotelComments,
                meta={'cid':response.meta['cid'], 'cpinyin':
                response.meta['cpinyin'], 'sid': sid})

    def parseSightsPage(self, response):
        citySightHTML = items.CtripCitySightHTML()
        citySightHTML['cid'] = response.meta['cid']
        citySightHTML['url'] = response.url
        citySightHTML['pageid'] = response.meta['pageid']
        citySightHTML['html'] = response.body.decode("gb18030").encode("UTF-8")
        yield citySightHTML

        sights = response.selector.xpath('//div[@class="list_wide_mod2"]/div[@class="list_mod2"]/div[@class="rdetailbox"]/dl/dt/a/@href').extract()
        cid = response.meta['cid']
        prefix = "http://you.ctrip.com%s"
        for sight in sights:
            tmp_url = prefix % sight
            yield scrapy.Request(tmp_url, callback=self.parseSightInfo,
            meta=response.meta) 
            print "request sight", tmp_url

        
    def parseSights(self, response):
        print "sights url:", response.url
        pages = response.selector.xpath('/html/body/div[5]/div/div[2]/div[2]/div[3]/div[16]/div/span/b').extract() 
        page_num = 0
        if len(pages) > 0:
            page_num = int(pages[0])
        response.meta['pageid'] = 1
        citySightHTML = items.CtripCitySightHTML()
        citySightHTML['cid'] = response.meta['cid']
        citySightHTML['url'] = response.url
        citySightHTML['pageid'] = response.meta['pageid']
        citySightHTML['html'] = response.body.decode("gb18030").encode("UTF-8")
        yield citySightHTML

        print "sights pages num:", page_num

        sights = response.selector.xpath('//div[@class="list_wide_mod2"]/div[@class="list_mod2"]/div[@class="rdetailbox"]/dl/dt/a').extract()
        cid = response.meta['cid']
        prefix = "http://you.ctrip.com%s"
        for sight in sights:
            tmp_url = prefix % sight
            yield scrapy.Request(tmp_url, callback=self.parseSightInfo, meta=response.meta) 
            print "request sight", tmp_url

        #deal page 2 to all
        cid = response.meta['cid']
        prefix = 'http://you.ctrip.com/sight/%s%s/' % (response.meta['cpinyin'], response.meta['cid']) 
        for page in range(2, page_num+1):
        #for page in range(2, 2):
            tmpurl = prefix + "s0-p%s" % page
            yield scrapy.Request(tmpurl, callback=self.parseSightsPage,
            meta={'cid': cid, 'pageid': page, 'cpinyin': response.meta['cpinyin']})

    def parseCountryPage(self, response):
        pass

    def parseChinaPage(self, response):
        ''' china is special
        '''
        chinaPlaces = response.selector.xpath('//div[@class="map map_china"]/a')
        for p in chinaPlaces:
            place = items.CtripChinaPlace()
            url = p.xpath('@href').extract()[0]
            place['pid'] = url.rstrip('.html').lstrip('/place/').lstrip('a-zt sta')
            place['pname'] = p.xpath('text()').extract()[0]
            place['url'] = url
            print place 

    def parse(self, response):
        
        continents = response.selector.xpath('//*[@id="newMasterForm"]/div[6]/div/div[2]/div[1]/span')
        for continent in continents:
            conitem = items.CtripContinent()
            conitem['conid'] = continent.xpath('@v').extract()[0].lstrip('#')
            conitem['conname'] = continent.xpath('text()').extract()[0]
        
        base = response.selector.xpath('//*[@id="newMasterForm"]/div[6]/div/div[2]')
        conids = base.xpath('a/@name').extract()
        countrylists = base.xpath('div[@class="countrylist"]')
        prefix = "http://you.ctrip.com/"
        for i in range(0, len(conids)):
            conid = conids[i]
            countrylist = countrylists[i].xpath('div/ul/li')
            for obj  in countrylist:
                country = items.CtripCountry()
                country['conid'] = conid
                url = obj.xpath('a/@href').extract()[0]
                country['ccname'] = obj.xpath('a/text()').extract()[0]
                country['cename'] = obj.xpath('span/text()').extract()[0]
                country['url'] = url
                country['countryid'] = url.rstrip('.html').lstrip('/place/'+country['cename'].lower())
                #print country
                if country['cename'].lower() == 'china':
                    yield scrapy.Request(prefix+url, callback=self.parseChinaPage, meta={'countryid': country['countryid']})
                else:
                    continue
                    yield scrapy.Request(prefix+url, callback=self.parseCountryPage, meta={'countryid': country['countryid']})
                
        return         
        hotel_view_prefix = "http://you.ctrip.com/sight/"

        for city in self.cities:
            #code for debug and test
            #In this spider, no need to du this
            #yield city
            tmp_url = hotel_view_prefix + city['pinyin'] + city['cid'] + '.html'

            yield scrapy.Request(tmp_url, callback=self.parseSights,
            meta={'cid': city['cid'], 'cpinyin': city['pinyin']})
