#coding: utf8
import urlparse
import time
import os.path
from scrapy import log
import re
from pygeocoder import Geocoder
from decimal import *
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from fresh.items import RestaurantItem
from fresh.items import RestaurantReview
from fresh.YelpMasterDB import YelpMasterDB
import datetime
import MySQLdb
from fresh.utils import Utils
from fresh.YelpUtils import YelpUtils
import sys
import json

class YelpMasterSpider(BaseSpider):

    name = "yelpmaster"
    allowed_domains = ["yelp.es"]
    start_urls = []
    db = YelpMasterDB()
    logLocalName="YELP_MAST_SCRAPPY"
    numRestFound=0
    
    utils = Utils()
    yelpUtils = YelpUtils()

    """the starting and end crawling may come from the actual URL to perform scrapy"""
    maxCrawl=None
    startCrawl=None
    crawlType=None
    crawlGroup=None

    def __init__(self, *args, **kwargs):
        super(YelpMasterSpider, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logYelpMaster/',filename)
        self.utils.logProcessInit(filename)
        
        self.utils.logProcessBegin(self.logLocalName)
    
        self.db.utils=self.utils
        self.yelpUtils.utils = self.utils
        if self.db.connectDB():
            typeNumber = self.db.readURLs(self.start_urls)
            if typeNumber is not None:
                self.utils.log("DEBUG","-----typenumber: "+str(typeNumber),self.utils.DEBUG)
                self.crawlType = typeNumber[0]
                self.utils.log("DEBUG","-----crawlType: "+str(self.crawlType),self.utils.DEBUG)
                self.crawlGroup = typeNumber[2]
                if self.crawlType == self.db.GROUP_TYPE_MAX:
                    self.maxCrawl=typeNumber[1]
                    self.utils.log("DEBUG","-----maxCrawl: "+str(self.maxCrawl),self.utils.DEBUG)
                    self.utils.logProcessDetail("Robot set to stop on max: "+str(self.maxCrawl), self.utils.INFO)
                if self.crawlType == self.db.GROUP_TYPE_START:
                    self.startCrawl=typeNumber[1]+1
                    self.utils.log("DEBUG","-----startCrawl: "+str(self.startCrawl),self.utils.DEBUG)
                    self.utils.logProcessDetail("Robot set to start at: "+str(self.startCrawl), self.utils.INFO)
            else:
                self.utils.logProcessDetail("Critical Error, can not read URLs from DB in order to crawl YELP", self.utils.ERROR)
                raise CloseSpider(reason='Critical Error, can not read URLs from DB in order to crawl YELP')
                
    
    def parse(self, response):
               
        hxs = HtmlXPathSelector(response)

        """
        from scrapy.shell import inspect_response
        inspect_response(response)
        """
        try:
            count = response.meta['count']
        except Exception,e:
            count=0
            if self.startCrawl is not None:
                count = self.startCrawl
                self.utils.logProcessDetail("Robot de Yelp starts at restaurant: "+str(count)+"for group: "+str(self.crawlGroup), self.utils.INFO)


        for list in hxs.select('//span[@class="indexed-biz-name"]'):
            restaurant = RestaurantItem()

            url = list.select('a/@href').extract()
            
            urls = self.utils.convertListToString(url)
            if len(urls)>0:
                urls = urlparse.urljoin(response.url, urls)
                restaurant['url_scrapy'] = urls
                yield Request(urls,callback=self.parse_restaurant,meta={'restaurant': restaurant})
                count=count+1
    
        """let's go for next page if we are allowed to"""        
        if self.maxCrawl is not None:
            if count > self.maxCrawl:
                self.utils.logProcessDetail("Stopping YELP robot. Max crawling detected: "+str(count)+"for group: " +str(self.crawlGroup), self.utils.INFO)
                return
                
        url=hxs.select('(//a[@class="page-option prev-next"])[2]/@href').extract()
        if len(url)==0:
            #estamos en la primera pagina y no hay atras
            url=hxs.select('(//a[@class="page-option prev-next"])[1]/@href').extract()
        if len(url)!=0:
            urlF = urlparse.urljoin(response.url,url[0])
            self.utils.logProcessDetail("going to next page start at count: " + str(count) + "url: " + str(urlF), self.utils.INFO)
            yield Request(urlF,callback=self.parse,meta={'count':count})
                
        

    def parse_restaurant(self, response):
        try:
            hxs = HtmlXPathSelector(response)
            restaurant = response.meta['restaurant']

            """from scrapy.shell import inspect_response
            inspect_response(response)"""
            
            self.numRestFound=self.numRestFound+1
            self.utils.log("debug","num restaurants found: "+str(self.numRestFound),self.utils.DEBUG)
         
            restaurant = response.meta['restaurant']
            
            self.yelpUtils.fillRestaurant(response,hxs,restaurant)

            return restaurant
        except Exception,e:
            self.utils.logProcessDetail("Error parsing restaurant en YELP: " + str(restaurant) + "detail: " +str(e) ,self.utils.ERROR)

    def close_spider(self):
        self.utils.logProcessEnd("Robot YELP found: "+str(self.numRestFound) + " restaurants for group: "+str(self.crawlGroup), self.utils.DEBUG)
