# -*- coding: utf-8 -*-
# TODO: better logger
import sys, time, urllib, random, subprocess, os
from django.core.management.base import BaseCommand
from django.utils.encoding import smart_str, smart_unicode
from optparse import make_option
from options.models import Source, Category, Language
from BeautifulSoup import BeautifulSoup
from globalresources.models import Resource
from utils import folder_size

# THIS IS AN EXAMPLE/MODEL/BEST PRACTICE ON HOW TO WRITE RESOURCES PLUGINS
# the pattern is:

SOURCE_ID = 999
SOURCE_URL = "http://opengoodcontents.org/"
SOURCE_SLUG = "opengoodcontents.org"
SOURCE_NAME = "Open Good Contents (OGC)"
SOURCE_KNOWN_TOTAL = 11126
SOURCE_ITEMS_PER_PAGE = 100
SOURCE_TOTAL_PAGES = SOURCE_KNOWN_TOTAL / SOURCE_ITEMS_PER_PAGE


CATEGORY_DICT = {
    "audio": 9,
    "imagem": 2,
    "mapa": 7,
    "experimento": 3,
    "software": 8,
    "animacao": 5,
    "ebook": 10,
    "hipertexto": 6,
    "video-class": 4,
    "video": 1,
}

def downloader(urltodownload, basetarget, filename):
    '''download file at urltodownload to basetarget/downloaded_source if packed, and unpack to basetarget'''
    # here you will download the file, observing above
    pass

class GoodContentItem:
    def __init__(self, resource, created=False):
        # create a class that will receive the resource object, with it's the url to be parsed.
        print "fetching individual... %s" % resource.resource_url
        print "parsing url... %s" % resource.resource_url
        # you can 
        # self.soup = BeautifulSoup(s)
        #self.resource = resource

    def parse(self):
        # here is where you will collect the maximium information of the webscrapped object possible
        # try to always except any kind of error. you can mark the resource with some other status, to check later.
        # self.status = 'processing...'
        # ficha = self.soup.find('div', id="ficha_recurso")
        # link = self.soup.find('a', id="link_visualizar_recurso")
        # trigger = link.attrs[2][1].split("/")[-1].split("?")[0]
        # try:
        #   language_listed = ficha.table.contents[3].contents[1].text.lower()
        # except:
        #   language_listed
        # TODO: needs improvement
        
        
    def download(self):
        #r = self.resource
        #work_folder = os.path.dirname(smart_str(r.content_root()))
        print "- CREATING DIR..."
        #subprocess.call("mkdir -vp %s" % work_folder, shell=True)
        # run downloader
        print "- RUNNING DOWNLOADER"
        #urltodownload = str(r.resource_download_url)
        #basetarget = str(work_folder)
        downloader(urltodownload, basetarget, str(r.resource_downloaded_file))
        
    def save(self):
        # prepopulate the object
        # self.resource.title = self.title
        # self.resource.structure = self.structure
        # self ... ... ...
        # self.resource.save()

class Command(BaseCommand):
    help = "Sync Resources from Resource N: SOME GOOD CONTENT"
    args = "--sync, --get GRID,GRID,GRID, --nowdownload"
    option_list = BaseCommand.option_list + (
    make_option('--sync',
        action='store_true',
        dest='sync',
        help='Sync Resources with Source.'),
    make_option('--nodownload',
        action='store_true',
        dest='nodownload',
        help='Only get informations. Do not download'),
    make_option('--get',
        action='append',
        dest='get',
        help='Get specific GRIDS for debug.'),
    make_option('--force-download',
        action='store_true',
        dest='force_download',
        help='Force download even if already downloaded.'),
    )

    def handle(self, *args, **options):
        # pagesets as arguments
        if args:
            try:
                start,finish = args[0].split(',')
            except:
                pass
        else:
            start,finish = 1,SOURCE_TOTAL_PAGES
        sync = options.get('sync')
        get = options.get('get')
        nodownload = options.get('nodownload')
        force_download = options.get('force_download')
        range_values = options.get('range_values')
        # GET SPECIFIC ITEMS, AND DO SOMETHING
        if get:
            grids = get[0].split(",")
            for grid in grids:
                print "GRID:",grid
                try:
                    resource = Resource.objects.get(pk=grid)
                    # do something with the resource, like:
                    #resourceitem = PortalDoProfessorItem(resource, False)
                    #print "GRID TITLE: %s" % resourceitem.title
                    #resourceitem.download()
                except DoesNotExist:
                    raise
                
        if sync:
            # try to get the source from database. this will create
            source, created = Source.objects.get_or_create(pk=SOURCE_ID, url=SOURCE_URL, slug=SOURCE_SLUG, name=SOURCE_NAME)
            print "Source created?",created
            print "TOTAL PAGES: %s" % SOURCE_TOTAL_PAGES
            all_pages = range(SOURCE_TOTAL_PAGES)
            all_pages.reverse()
            # at this point, you have the created content source above, and have found a API or a pattern to do some
            # webscrapping.
            for page in all_pages[int(start):int(finish)]:
                print "PAGE %s" % page
                url = "http://portaldoprofessor.mec.gov.br/resultadoBusca.html?pagina=%s&tamanhoPagina=%s&ordem=0" % (page, SOURCE_ITEMS_PER_PAGE)
                print "hitting",url
                f = urllib.urlopen(url)
                s = f.read()
                f.close()
                soup = BeautifulSoup(s)
                in_page_items = len(soup.findAll('tr'))
                # for each individual resource
                i = 0
                for resource_item in range(1,in_page_items):
                    print "#######"*4
                    try:
                        id = soup('tr')[resource_item].first('a').attrs[0][1].split('=')[1]
                        cat = soup('tr')[resource_item].findAll('img')[0].attrs[0][1].split("/")[1].split("_")[1].split(".")[0]
                    except:
                        id = "error%s" % i
                        i += 1
                        cat = ''
                    print "RESOURCE_REFERENCE_STRING: %s" % id
                    print "CATEGORY: %s" % cat
                    # resource informations
                    resource_url = "%sfichaTecnica.html?id=%s" % (SOURCE_URL, id)
                    resource,created = Resource.objects.get_or_create(
                        resource_reference_string=id, source=source, resource_url=resource_url
                    )
                    first_status = resource.status
                    print "Created?",created
                    print "DB ITEM: %d" % resource.pk
                    try:
                        category_object = Category.objects.get(pk=CATEGORY_DICT[cat])
                    except:
                        category_object = ""
                    if resource.status != 'downloaded':
                        resource.status = "processing"
                    # START CLASS
                    r = PortalDoProfessorItem(resource, created)
                    r.parse()
                    print "DB TITLE:", r.title
                    r.resource.category = category_object
                    r.resource.save()
                    try:
                        r.save()
                    except:
                        print "ERROR PARSING"
                        r.resource.status = 'error'
                        r.resource.save()
                    if nodownload:
                        print "NOT DOWNLOADING!"
                        print "STATUS",r.resource.status
                    else:
                        if force_download or first_status != 'downloaded':
                            try:
                                print "--DOWNLOADING...FORCED?",force_download
                                print "--FIRST STATUS:",first_status
                                r.download()
                                r.resource.status = 'downloaded'
                                r.resource.save()
                            except:
                                print "ERROR DOWNLOADING"
                                r.resource.status = 'error'
                        else:
                            print "-- CONTENT ALREADY MARKED AS DOWNLOADED"
                            r.size = folder_size(r.resource.content_root())
                            r.save()
                    # generate thumbs