#coding: utf8
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
import scrapy.signals
import urlparse
import time
import os.path
from scrapy import log
from decimal import *
from fresh.TwitterFbDB import TwitterFbDB
import datetime
import MySQLdb
import sys
from fresh.utils import Utils

class TwitterFbSearchDeep(CrawlSpider):
    name = 'twfbSearchDeep'
    allowed_domains = list()
    
    db = TwitterFbDB()
    idRest=['0']

    rules = (
        Rule(SgmlLinkExtractor(), callback='parse_item', follow=True),
    )

    utils=Utils()
    logLocalName="TWITTER_FB_DEEP_SCRAPPY"


    def __init__(self, *args, **kwargs):
        super(TwitterFbSearchDeep, self).__init__()
        reload(sys)
        sys.setdefaultencoding('utf-8')
        time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        filename = time + '.log'
        filename = os.path.join('logTwitterFbDeep/',filename)
        self.utils.logProcessInit(filename)

        self.closespider_errorcount = 100
        self.closespider_pagecount = 100
        self.closespider_exceptioncount = 100
        self.exception_type_count = 100

        self.db.utils=self.utils

        if (self.db.connectDB()):
            self.db.readURLDeep(self.start_urls,self.idRest)
            domain = self.utils.getDomain(self.start_urls[0])
            self.utils.log(self.logLocalName,"DOMAIN: " + domain,self.utils.DEBUG)
            self.allowed_domains.append(domain)


    def parse_item(self, response):
        try:
            """
            from scrapy.shell import inspect_response
            inspect_response(response)
            """
            
            hxs = HtmlXPathSelector(response)
            links = hxs.select('//@href').extract()

            user_tw=None
            user_fb=None
            dictTW = dict()
            dictFB = dict()
            bUpdate=False
            """WE ADD TO A DICTIONARY IN ORDER TO AVOID REPETITIONS (DISTINCT)"""
            for link in links:
                slink = str(link)
                if slink.find('twitter.com')!=-1 or slink.find('twitter.es')!=-1:
                    dictTW[slink]=0
                if slink.find('facebook.com') != -1 or slink.find('facebook.es') != -1:
                    dictFB[slink]=0

            user_tw=self.utils.getTwitterAccountFromList(dictTW.keys())
            if user_tw is not None:
                bUpdate=True
                self.utils.log(self.logLocalName,"twitter found: " + user_tw,self.utils.DEBUG)
               
            user_fb = self.utils.getFaceBookAccountFromList(dictFB.keys())
            if user_fb is not None:
                bUpdate=True
                self.utils.log(self.logLocalName,"facebook found: " + user_fb,self.utils.DEBUG)
                
                 
            if bUpdate:
                self.db.insertTwFb(self.idRest[0],user_tw,user_fb)


            return               


        except Exception,e:
            self.utils.log(self.logLocalName,"Error en parse web:" + str(response.url)+ "para twitter y FB: " + str(e),self.utils.ERROR)
                        

    def close_spider(self):
            return
