#coding: utf8
import urlparse
import time
import os.path
from scrapy import log
import re
from pygeocoder import Geocoder
from decimal import *
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from fresh.items import RestaurantItem
from fresh.items import RestaurantReview
from fresh.TenedorMasterDB import TenedorMasterDB
import datetime
import MySQLdb
from fresh.utils import Utils
from fresh.TenedorUtils import TenedorUtils
import sys
from scrapy import signals

class TenedorMasterSpider(BaseSpider):

    name = "tenedormaster"
    allowed_domains = ["eltenedor.es"]
    start_urls = [
        "http://www.eltenedor.es/w/restaurant/restaurant_search.php?custom_search=1&name=dummy&id_country_area=24",
        "http://www.eltenedor.es/restaurante+madrid"
        ]
    db = TenedorMasterDB()
    utils=Utils()
    tenedorUtils = TenedorUtils()
    
    logLocalName="TENEDOR_MAST_SCRAPPY"

    numRestFound=0

    def __init__(self, *args, **kwargs):
        super(TenedorMasterSpider, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logTenedorMaster/',filename)
        self.utils.logProcessInit(filename)

        self.db.utils=self.utils
        self.tenedorUtils.utils=self.utils

        self.utils.logProcessBegin(self.logLocalName)
        
        self.db.connectDB()
        
   
    
    def parse(self, response):
        try:
            hxs = HtmlXPathSelector(response)
            if response.url.find("dummy") == -1:
                """
                from scrapy.shell import inspect_response
                inspect_response(response)
                """

                for list in hxs.select('//div[@class="left m_l info"]/h3/a/@href'):
                    url = list.extract()
                    urlS = self.utils.convertListToString(url)
                    if len(urlS)>0:
                        urlS = urlparse.urljoin(response.url,urlS)
                        yield Request(urlS,callback=self.parse_restaurant)

                #next page
                url=hxs.select('//li[@class="next"]/a/@href').extract()
                urlS = self.utils.convertListToString(url)
                if len(urlS)>0:
                    urlS = urlparse.urljoin(response.url,urlS)
                    yield Request(urlS,callback=self.parse)
                    
        except Exception,e:
            self.utils.logProcessDetail("Error al parse: " + str(e),self.utils.ERROR)


    def parse_restaurant(self, response):
        try:
            restaurant = RestaurantItem()
            hxs = HtmlXPathSelector(response)
            self.numRestFound=self.numRestFound+1
            self.utils.log("debug","num restaurants found: "+str(self.numRestFound),self.utils.DEBUG)

            self.tenedorUtils.fillRestaurant(response,hxs,restaurant)

            return restaurant
         

        except Exception,e:
            self.utils.logProcessDetail("Error al parse_restaurant: " + str(restaurant) + " error: " + str(e),self.utils.ERROR)
             

    def close_spider(self):
        self.utils.logProcessEnd("Robot Tenedor found: "+str(self.numRestFound) + " restaurants", self.utils.DEBUG)
