#coding: utf8
import urlparse
import time
import os.path
from scrapy import log
import re
from pygeocoder import Geocoder
from decimal import *
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from fresh.items import RestaurantItem
from fresh.items import RestaurantReview
from fresh.RestaloMasterDB import RestaloMasterDB
import datetime
import MySQLdb
from fresh.utils import Utils
from fresh.RestaloUtils import RestaloUtils
import sys

class RestaloMasterSpider(BaseSpider):

    name = "restalomaster"
    allowed_domains = ["www.restalo.es"]
    start_urls = [
        "http://www.restalo.es/restaurantes-madrid/?page=21"
        ]
    db = RestaloMasterDB()

    utils = Utils()
    restaloUtils = RestaloUtils()
    
    logLocalName="RESTALO_MAST_SCRAPPY"
    
    page=0
    numRestFound=0
    
    def __init__(self, *args, **kwargs):
        super(RestaloMasterSpider, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logRestaloMaster/',filename)
        self.utils.logProcessInit(filename)

        self.utils.logProcessBegin(self.logLocalName)

        self.db.utils=self.utils
        self.restaloUtils.utils = self.utils
        self.db.connectDB()
 
    
    def parse(self, response):
               
        hxs = HtmlXPathSelector(response)

        self.page=self.page+1
        self.utils.log("DEBUG", "Page Number Restalo: " + str(self.page),self.utils.DEBUG)
        
        for list in hxs.select('//div[@class="itemInfoBox"]'):
            """just get url of restaurant in restaloa"""
            restaurant = RestaurantItem()
            url = list.select('div[@class="restaurantInfo"]/h3[@class="restaurantTitle"]/a/@href').extract()
            
            if len(url)>0:
                url = urlparse.urljoin(response.url, url[0])
                restaurant['url_scrapy'] = url
                yield Request(url,callback=self.parse_restaurant,meta={'restaurant': restaurant})

        """let's go for next page"""

        url=hxs.select('//ul[@id="pager"]/li[@class="next"]/a/@href').extract()
        if len(url)>0:
            yield Request(urlparse.urljoin(response.url,url[0]),callback=self.parse)


    
    def parse_restaurant(self, response):
        try:
            hxs = HtmlXPathSelector(response)
            restaurant = response.meta['restaurant']

            self.numRestFound=self.numRestFound+1
            self.utils.log("debug","num restaurants found: "+str(self.numRestFound),self.utils.DEBUG)
         
            restaurant = response.meta['restaurant']

            self.restaloUtils.fillRestaurant(response,hxs,restaurant)

  
            return restaurant
        except Exception,e:
            self.utils.logProcessDetail("Error parsing restaurant en RESTALO: " + str(restaurant) + "detail: " +str(e) ,self.utils.ERROR)
    

    def close_spider(self):
        self.utils.logProcessEnd("Robot RESTALO found: "+str(self.numRestFound) , self.utils.DEBUG)

            
        