#coding: utf8
import urlparse
import time
import os.path
from scrapy import log
import re
from pygeocoder import Geocoder
from decimal import *
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from fresh.items import RestaurantItem
from fresh.items import RestaurantReview
from fresh.AtrapaloMasterDB import AtrapaloMasterDB
import datetime
import MySQLdb
from fresh.utils import Utils
from fresh.AtrapaloUtils import AtrapaloUtils
import sys

class AtrapaloMasterSpider(BaseSpider):

    name = "atrapalomaster"
    allowed_domains = ["www.atrapalo.com"]
    start_urls = [
        "http://www.atrapalo.com/restaurantes/madrid_d121/listado/p1/"
        ]
    db = AtrapaloMasterDB()

    utils = Utils()
    atrapaloUtils = AtrapaloUtils()
    
    logLocalName="ATRAPALO_MAST_SCRAPPY"
    
    page=0
    numRestFound=0
    
    def __init__(self, *args, **kwargs):
        super(AtrapaloMasterSpider, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logAtrapaloMaster/',filename)
        self.utils.logProcessInit(filename)

        self.utils.logProcessBegin(self.logLocalName)

        self.db.utils=self.utils
        self.atrapaloUtils.utils = self.utils
        self.db.connectDB()
 
    
    def parse(self, response):
        try:
               
            hxs = HtmlXPathSelector(response)
            
            #from scrapy.shell import inspect_response
            #inspect_response(response)
            


            self.page=self.page+1
            self.utils.log("DEBUG", "Page Number Atrapalo: " + str(self.page),self.utils.DEBUG)
            
            for list in hxs.select('//h2[@class="productName"]'):
                """just get url of restaurant in atrapalo"""
                url = list.select('(a)[2]/@href').extract()
                urlS = self.utils.convertListToString(url)
                if len(urlS)>0:
                    restaurant = RestaurantItem()
                    restaurant['url_scrapy'] = urlS
                    yield Request(urlS,callback=self.parse_restaurant,meta={'restaurant': restaurant})

            """let's go for next page"""
            url=hxs.select('//ul[@class="paginacion floatr"]/li[@class="pag-bot"]/a[@title="Siguiente"]/@href').extract()
            if len(url)>0:
                url = urlparse.urljoin(response.url, url[0])
                yield Request(url,callback=self.parse)

        except Exception,e:
            self.utils.logProcessDetail("Error parsing en ATRAPALO: " + str(e) ,self.utils.ERROR)
    
    def parse_restaurant(self, response):
        try:
            hxs = HtmlXPathSelector(response)
            restaurant = response.meta['restaurant']

            self.numRestFound=self.numRestFound+1
            self.utils.log("debug","num restaurants found: "+str(self.numRestFound),self.utils.DEBUG)
         
            restaurant = response.meta['restaurant']
            
            self.atrapaloUtils.fillRestaurant(response,hxs,restaurant)

            self.utils.log("debug","RESTAURANTE: "+str(restaurant),self.utils.DEBUG)

  
            return restaurant
        except Exception,e:
            self.utils.logProcessDetail("Error parsing restaurant en ATRAPALO: " + str(restaurant) + "detail: " +str(e) ,self.utils.ERROR)
    

    def close_spider(self):
        self.utils.logProcessEnd("Robot ATRAPALO updated: "+str(self.numRestFound) , self.utils.DEBUG)

            
        