#coding: utf8
import urlparse
import time
import os.path

from decimal import *
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from fresh.TwitterFbDB import TwitterFbDB
import datetime
import MySQLdb
import sys
from fresh.utils import Utils

class TwitterFbSearchMain(BaseSpider):

    name = "twfbSearchMain"
    db = TwitterFbDB()

    utils=Utils()
    logLocalName="TWITTER_FB_MAIN_SCRAPPY"
    
    
    """I use a list although there's a single restaurant in order to force parameter by reference in function"""
    idRest=['0']

    def __init__(self, *args, **kwargs):
        super(TwitterFbSearchMain, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logTwitterFbMain/',filename)
        self.utils.logProcessInit(filename)
        
        self.closespider_errorcount = 100
        self.closespider_pagecount = 100
        self.closespider_exceptioncount = 100
        self.exception_type_count = 100
        
        self.db.utils=self.utils

        if (self.db.connectDB()):
            """Read list of restaurants"""
            self.db.readURLMain(self.start_urls,self.idRest)
 
    def parse(self, response):
        try:
            """
            from scrapy.shell import inspect_response
            inspect_response(response)
            """
            
            hxs = HtmlXPathSelector(response)
            links = hxs.select('//@href').extract()

            user_tw=None
            user_fb=None
            dictTW = dict()
            dictFB = dict()
            bUpdate=False
            """WE ADD TO A DICTIONARY IN ORDER TO AVOID REPETITIONS (DISTINCT)"""
            for link in links:
                slink = str(link)
                if slink.find('twitter.com')!=-1 or slink.find('twitter.es')!=-1:
                    dictTW[slink]=0
                if slink.find('facebook.com') != -1 or slink.find('facebook.es') != -1:
                    dictFB[slink]=0

            user_tw=self.utils.getTwitterAccountFromList(dictTW.keys())
            if user_tw is not None:
                self.utils.log(self.logLocalName, "twitter found: " + user_tw, self.utils.DEBUG)
               
            user_fb = self.utils.getFaceBookAccountFromList(dictFB.keys())
            if user_fb is not None:
                self.utils.log(self.logLocalName, "facebook found: " + user_fb, self.utils.DEBUG)
                                                
            self.db.insertTwFb(self.idRest[0],user_tw,user_fb)

            return               


        except Exception,e:
            self.utils.log(self.logLocalName, "Error en parse web: " + str(response.url) + " para twitter y FB: " + str(e), self.utils.ERROR)


    def close_spider(self):
            return
            