#coding: utf8
import urlparse
import time
import os.path
from scrapy import log
import re
from pygeocoder import Geocoder
from decimal import *
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from fresh.items import RestaurantItem
from fresh.items import RestaurantReview
from fresh.TripMasterDB import TripMasterDB
import datetime
import MySQLdb
from fresh.utils import Utils
from fresh.TripUtils import TripUtils
import sys

class TripMasterSpider(BaseSpider):

    name = "tripmaster"
    allowed_domains = ["www.tripadvisor.es"]
    start_urls = [
        "http://www.tripadvisor.es/Restaurants-g187514-Madrid.html"
        ]
    db = TripMasterDB()

    utils = Utils()
    tripUtils = TripUtils()
    
    logLocalName="TRIP_MAST_SCRAPPY"
    
    page=0
    numRestFound=0
    
    def __init__(self, *args, **kwargs):
        super(TripMasterSpider, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logTripMaster/',filename)
        self.utils.logProcessInit(filename)

        self.utils.logProcessBegin(self.logLocalName)

        self.db.utils=self.utils
        self.tripUtils.utils = self.utils
        self.db.connectDB()
 
    
    def parse(self, response):
               
        hxs = HtmlXPathSelector(response)

        self.page=self.page+1
        self.utils.log("DEBUG", "Page Number tripadvisor: " + str(self.page),self.utils.DEBUG)
        
        for list in hxs.select('//div[@id="EATERY_SEARCH_RESULTS"]/div'):
            """just get url of restaurant in tripadvisor"""
            restaurant = RestaurantItem()
            url = list.select('div[@class="quality easyClear"]/a[@class="property_title"]/@href').extract()
            if len(url)>0:
                url = urlparse.urljoin(response.url, url[0])
                restaurant['url_scrapy'] = url
                yield Request(url,callback=self.parse_restaurant,meta={'restaurant': restaurant})

        """let's go for next page"""

        url=hxs.select('//a[@class="guiArw sprite-pageNext "]/@href').extract()
        if len(url)>0:
            yield Request(urlparse.urljoin(response.url,url[0]),callback=self.parse)



    def parse_restaurant(self, response):
        try:
            hxs = HtmlXPathSelector(response)
            restaurant = response.meta['restaurant']

            """from scrapy.shell import inspect_response
            inspect_response(response)"""
            self.numRestFound=self.numRestFound+1
            self.utils.log("debug","num restaurants found: "+str(self.numRestFound),self.utils.DEBUG)
         
            restaurant = response.meta['restaurant']

            self.tripUtils.fillRestaurant(response,hxs,restaurant)
  
            return restaurant
        except Exception,e:
            self.utils.logProcessDetail("Error parsing restaurant en TRIP: " + str(restaurant) + "detail: " +str(e) ,self.utils.ERROR)

    def close_spider(self):
        self.utils.logProcessEnd("Robot TRIP found: "+str(self.numRestFound) , self.utils.DEBUG)

            
        