#coding: utf8
import urlparse
import time
import os.path
from scrapy import log
import re
from pygeocoder import Geocoder
from decimal import *
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from fresh.items import RestaurantItem
from fresh.items import RestaurantReview
from fresh.PaginasMasterDB import PaginasMasterDB
import datetime
import MySQLdb
from fresh.utils import Utils
from fresh.PaginasUtils import PaginasUtils
import sys

class PaginasMasterSpider(BaseSpider):

    name = "paginasmaster"
    allowed_domains = ["www.paginasamarillas.es"]
    start_urls = [
        "http://www.paginasamarillas.es/search/restaurantes/all-ma/madrid/all-is/madrid/all-ba/all-pu/all-nc/all-co/303?where=madrid&nb=false&ub=false"
        ]
    db = PaginasMasterDB()

    utils = Utils()
    paginasUtils = PaginasUtils()
    
    logLocalName="PAGINAS_MAST_SCRAPPY"
    
    page=0
    numRestFound=0
    
    def __init__(self, *args, **kwargs):
        super(PaginasMasterSpider, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logPaginasMaster/',filename)
        self.utils.logProcessInit(filename)

        self.utils.logProcessBegin(self.logLocalName)

        self.paginasUtils.utils = self.utils
        
        self.db.utils=self.utils
        self.db.connectDB()

 
    
    def parse(self, response):
        try:               
            hxs = HtmlXPathSelector(response)

            self.page=self.page+1
            self.utils.log("DEBUG", "Page Number paginas: " + str(self.page),self.utils.DEBUG)
            
            for list in hxs.select('//li[@class="m-results-business m-results-business-advert"]'):
                """just get url of restaurant in tripadvisor"""
                restaurant = RestaurantItem()
                self.numRestFound=self.numRestFound+1
                self.utils.log("debug","num restaurants found: "+str(self.numRestFound),self.utils.DEBUG)

                self.paginasUtils.fillRestaurant(response,list,restaurant)
                """ in this case insert directly here instead of pipeline"""
                self.db.insertRestaurants(restaurant,self.db.INSERT)

            for list in hxs.select('//li[@class="m-results-business"]'):
                """just get url of restaurant in tripadvisor"""
                restaurant = RestaurantItem()
                self.numRestFound=self.numRestFound+1
                self.utils.log("debug","num restaurants found: "+str(self.numRestFound),self.utils.DEBUG)

                self.paginasUtils.fillRestaurant(response,list,restaurant)
                """ in this case insert directly here instead of pipeline"""
                self.db.insertRestaurants(restaurant,self.db.INSERT)
                

            """let's go for next page"""
            url=hxs.select('//li[@class="last"]/a/@href').extract()
            if len(url)>0:
                yield Request(urlparse.urljoin(response.url,url[0]),callback=self.parse)


                
        except Exception,e:
            self.utils.logProcessDetail("Error parsing restaurant en PAGINAS: " + str(e) ,self.utils.ERROR)



    def close_spider(self):
        self.utils.logProcessEnd("Robot Paginas Amarillas found: "+str(self.numRestFound) , self.utils.DEBUG)

            
        