#!/usr/bin/python
# -*- coding: utf-8 -*-
import traceback, os
from cStringIO import StringIO
try:
    import cPickle as pickle
except:
    import pickle
from datetime import datetime
from threading import Semaphore, Thread
import re

# from local
from lib.utils import timeago, sanitizestring, transcode5
from lib.network import clean_url, fetch_url3
from lib.database_qqshuqian import Database, tt_get, tt_put, tt_init, tt_close
from lib.html_extractor import extract_text

def redirect_filter(txt):
    if not txt:
        return
    try:
        match = re.findall(ur'\u8bf7\u70b9\u6b64\u8fdb\u5165\uff01', txt.decode('utf-8'))
    except:
        return
#    print 'lllllllll',len(match)
    if len(match) >= 3:
        return True
    else:
        return False


def make_scrap(linkid, url, title, debug = False):
    def scrap():
        global scrapers_semaphore
        #tcinstance = tt_init('192.168.1.103')
        tcinstance = tt_init('localhost')
        try:
            if debug:
                print 'FETCHING ', linkid, url
            content = fetch_url3(clean_url(url), retries = 2)                
            if content:
                content = transcode5(content)
                plaintext = extract_text(content, use_ann = True)
                if plaintext:
                    if redirect_filter(plaintext):
                        tt_put(tcinstance, linkid, '@@REDIRECT@@')
                    if len(plaintext) < 20:
                        tt_put(tcinstance, linkid, '@@TINY@@')
                    tt_put(tcinstance, linkid, plaintext)
                else:
                    tt_put(tcinstance, linkid, '@@EMPTY@@')
            else:
                tt_put(tcinstance, linkid, '@@EMPTY@@')
        except:
            print 'ERROR in FETCHING %s %s %s' % (linkid, url, title)
            #print content
            tt_put(tcinstance, linkid, '@@EMPTY@@')
            sio = StringIO()
            traceback.print_exc(file=sio)
            print sio.getvalue()
            sio.close()
        tt_put(tcinstance, 'date_' + linkid, pickle.dumps(datetime.now()))
        tt_close(tcinstance)
        scrapers_semaphore.release()
    return scrap

scrapers_semaphore = Semaphore(32)

def process_new_links(recrawl_period, force = False):
    """Fetches links from the last period and sets their media
    properities. If force is True, it will fetch properities for links
    even if the properties already exist"""
    dal = Database()
    last_seq = 0

    tcinstance = tt_init('192.168.1.103')
    tt_put(tcinstance, "EMPTY", "@@EMPTY@@")
    while True:
        print 'getting links'
        ret = dal._queryLink(last_seq)
        print 'new batch'
        if ret:
            for x in ret:
                linkid = x[0]
                title = x[1]
                url = x[2]
                last_seq = x[7]
                sth = tt_get(tcinstance, 'date_' + linkid)
                content = tt_get(tcinstance, linkid)
                if sth:
                    latest_try = pickle.loads(sth)
                    if latest_try > timeago(recrawl_period) and content:
                        print ('ALREADY FETCHED %s %s %s ' % (linkid, url, title)) + str(latest_try)
                        continue
                worker_thread = Thread(target = make_scrap(linkid, url, title, debug=True))
                scrapers_semaphore.acquire()
                worker_thread.start()
        else:
            break
    tt_close(tcinstance)

process_new_links('1 month')
