import boto
import boto.exception
import gdata.spreadsheets.client
import glp.util
import os
import re
from google.appengine.api import users
from google.appengine.ext import db

from glp.library import DocumentMetadata
from glp.library import LabelInfo
from glp.library import LabelInfos
from glp.library import LabelUtils
from glp.library import LabelUtils2

class Stats(db.Model):
    next_file_id = db.IntegerProperty(required=True)
    next_bookmark_id = db.IntegerProperty(required=True)
    next_staging_id = db.IntegerProperty(required=True)

class Librarian:
    ingestion_lookup = {
        'Primary 1' : 'p1',
        'Primary 2' : 'p2',
        'Primary 3' : 'p3',
        'Primary 4' : 'p4',
        'Primary 5' : 'p5',
        'Primary 6' : 'p6',
        'Primary (All)' : 'primary',
        'Secondary 1' : 's1',
        'Secondary 2' : 's2',
        'Secondary 3' : 's3',
        'Secondary 4' : 's4',
        'Secondary 5' : 's5',
        'Secondary 6' : 's6',
        'Secondary (All)' : 'secondary',
        'Teaching Methods & Pedagogy (Professional Development)' : 'Teaching Methods and Pedagogy',
        'Teaching Method & Practice (Academic Subject)' : 'Teaching Methods and Practice',
        }

    @staticmethod
    def EnsureStats():
        stats = Stats.all().get()
        if stats == None:
            stats = Stats(next_file_id=100000,
                          next_bookmark_id=200000,
                          next_staging_id=400000)
            stats.put()
        return stats

    @staticmethod
    def GetSpreadsheetKey(client, name):
        key = None
        for i, entry in enumerate(client.GetSpreadsheets().entry):
            if entry.title.text == name:
                return entry.id.text.rsplit('/', 1)[1]
        return None

    @staticmethod
    def GetWorksheetInfo(client, spreadsheet_key, name):
        worksheet_id = None
        worksheet_rows = None
        for i, entry in enumerate(client.GetWorksheets(spreadsheet_key).entry):
            if entry.title.text == name:
                return entry.id.text.rsplit('/', 1)[1], int(entry.row_count.text)
        return None, None

    @staticmethod
    def GetTableInfo(client, spreadsheet_key, name):
        for i, entry in enumerate(client.GetTables(spreadsheet_key).entry):
            if entry.title.text == name:
                return entry.get_table_id(), int(entry.data.num_rows)
        return None, None

    @staticmethod
    def DeleteTable(client, spreadsheet_key, table_id):
        client.delete('https://spreadsheets.google.com/feeds/%s/tables/%s' % (spreadsheet_key, table_id), force=True)

    @staticmethod
    def DeleteAllTables(client, spreadsheet_key):
        for i, entry in enumerate(client.GetTables(spreadsheet_key).entry):
            Librarian.DeleteTable(client, spreadsheet_key, entry.get_table_id())

    @staticmethod
    def EnsureTableId(client, spreadsheet_key, worksheet_id, worksheet_name, worksheet_rows, name, column_map):
        table_id, table_rows = Librarian.GetTableInfo(client, spreadsheet_key, name)
        if table_id and (table_rows < worksheet_rows):
            Librarian.DeleteTable(client, spreadsheet_key, table_id)
            table_id = None
        if not table_id:
            client.AddTable(spreadsheet_key, name, "Table containing metadata to be staged",
                            worksheet_name, 1, worksheet_rows, 2, 'overwrite', column_map)
        return Librarian.GetTableInfo(client, spreadsheet_key, name)[0]

    @staticmethod
    def QualifyLabels(category, record, field):
        list = record.value_for_name(field)
        if list == None: return []
        labels = []
        category = LabelUtils2.TextToId(category)
        for label in list.split(', '):
            clean = label.rstrip().lstrip()
            print clean
            if clean in Librarian.ingestion_lookup:
                clean = Librarian.ingestion_lookup[clean]
            paren_index = clean.find(' (')
            if category == 'topic' and paren_index > -1:
                clean = clean[0:paren_index]
            clean = clean.replace(' - ', ' ')
            clean = LabelUtils2.TextToId(clean)
            if clean == 'n-a': continue
            clean = (category, clean)
            full_id = LabelUtils2.LabelToId(clean)
            if not LabelUtils2.LabelToText(clean):
                raise LookupError('unknown label: %s' % full_id)
            labels.append(full_id)
        return labels

    @staticmethod
    def ExtractDocumentMetadata(record, next_staging_id, printer):
        entry_type = record.value_for_name('DocumentType')
        if not entry_type:
            printer.StatusUpdate('Ignoring empty record.')
            return None
        entry_type = entry_type.rstrip().lstrip()
        id = "rec-%07d-XXX" % next_staging_id
        source_uri = None
        storage_uri = 'XXX'
        storage_type = None
        if entry_type == 'Bookmark for an external site':
            storage_uri = Librarian.GetCleanValue(record, 'ExternalBookmarkUrl')
            storage_type = 'site-youtube' if storage_uri[0:22] == 'http://www.youtube.com' else 'site-external'
            """elif entry_type == 'Bookmark for an internal REC page (Google v1)':
                storage_uri = Librarian.GetCleanValue(record, 'InternalBookmarkURL')
                storage_type = 'site-rec'
              elif entry_type == 'File previously uploaded to REC (Liferay)':
                  source_uri = record.value_for_name('LiferayFileURL').rstrip().lstrip()
                  storage_type = 'google-storage'"""
        elif entry_type == 'File to be uploaded to REC':
            source_uri = "file://%s" % Librarian.GetCleanValue(record, 'Filename')
            storage_type = 'google-storage'
        elif entry_type == 'REC video uploaded to YouTube':
            storage_uri = Librarian.GetCleanValue(record, 'YouTubeUrl')
            storage_type = 'site-youtube'
        else:
            printer.StatusUpdate('Ignoring unknown entry type: %s' % entry_type)
            return None
        title = Librarian.GetCleanValue(record, 'Title')
        if not title: title = '[no title entered]'
        authors = '' #Librarian.GetCleanValue(record, 'Authors')
        printer.StatusUpdate(authors)
        if authors == None:
            authors = []
        else:
            authors = [author.rstrip().lstrip() for author in authors.split('\n') if author]
        sources = Librarian.GetCleanValue(record, 'Source')
        if sources == None:
            sources = []
        else:
            sources = [source.rstrip().lstrip() for source in sources.split('\n') if source]
        description = Librarian.GetCleanValue(record, 'Description')
        if not description: description = '[no description entered]'
        year = record.value_for_name('PublicationYear')
        if year == 'Unknown' or year == 'N/A':
            year = None
        else:
            year = int(year)
        labels = ['storage:%s' % storage_type, 'status:live']
        labels.extend(Librarian.QualifyLabels("Type", record, "MaterialType"))
        labels.extend(Librarian.QualifyLabels("Topic", record, "Topics"))
        labels.extend(Librarian.QualifyLabels("Grade", record, "GradeLevels"))
        #labels.extend(Librarian.QualifyLabels("Language", record, "Languages"))
        # TODO(sdd): Process audience and sub-audience
        #labels.extend(Librarian.QualifyLabels("Audience", record, "Audiences"))
        #labels.extend(Librarian.QualifyLabels("Origin", record, "SourceCountries"))
        #labels.extend(Librarian.QualifyLabels("Origin", record, "SourceRegions"))
        return DocumentMetadata(
            id=id, # PrepareImportableDocuments
            gs_uri='XXX', # PrepareImportableDocuments, ImportDocuments
            storage_uri=storage_uri,
            source_uri=source_uri,
            content_type='XXX', # PrepareImportableDocuments
            title=title,
            authors=authors,
            sources=sources,
            description=description,
            year=year,
            labels=labels)

    @staticmethod
    def FilteredFileURIs(bucket, filter):
        filter_re = re.compile("(%s)\.[a-z]+$" % filter)
        uris = []
        for key in bucket.list():
            if filter_re.match(key.name):
                uris.append(boto.storage_uri('gs://%s/%s' % (key.bucket.name, key.name)))
        return uris

    @staticmethod
    def LiveFileURIs(bucket):
        return Librarian.FilteredFileURIs(bucket, "rec-\d+-\d+")

    @staticmethod
    def StagedFileURIs(bucket):
        return Librarian.FilteredFileURIs(bucket, "rec-\d+-XXX")

    @staticmethod
    def GetBucket():
        glp.util.EnsureBotoConfig()
        return boto.storage_uri('gs://glp-rec-library-beta').get_bucket()


    @staticmethod
    def FilteredMetadata(filter):
        results = []
        filter_re = re.compile(filter)
        for metadata in DocumentMetadata.all():
            if filter_re.search(metadata.id): results.append(metadata)
        return results

    @staticmethod
    def StagedMetadata():
        return Librarian.FilteredMetadata("rec-\d+-XXX")

    @staticmethod
    def ClearStagedMetadata():
        staged_re = re.compile("rec-\d+-XXX")
        to_delete = []
        for metadata in DocumentMetadata.all():
            print 'Considering %s...' % metadata.id
            if staged_re.match(metadata.id): to_delete.append(metadata)
        for metadata in to_delete:
            print 'Deleting %s...' % metadata.id
            metadata.delete()

    @staticmethod
    def ClearMetadata():
        query = DocumentMetadata.all()
        documents = query.fetch(500)
        while len(documents) > 0:
            db.delete(documents)
            documents = query.fetch(500)

    @staticmethod
    def ClearIndex():
        query = LabelInfo.all()
        labels = query.fetch(1000)
        while len(labels) > 0:
            db.delete(labels)
            labels = query.fetch(1000)
        documents = Librarian.FilteredMetadata('#indexed$')
        db.delete(documents)

    @staticmethod
    def StageMetadataSheet(client, spreadsheet_key, worksheet_name, table_name, column_map, printer):
        worksheet_id, worksheet_rows = Librarian.GetWorksheetInfo(client, spreadsheet_key, worksheet_name)
        if worksheet_id == None:
            printer.FinalReport("Unable to locate '%s' worksheet" % worksheet_name)
            return False
        table_id = Librarian.EnsureTableId(client, spreadsheet_key, worksheet_id, worksheet_name, worksheet_rows, table_name, column_map)
        if table_id == None:
            printer.FinalReport("Unable to locate '%s' table" % table_name)
            return False
        printer.StatusUpdate("Clearing existing staged metadata...")
        Librarian.ClearStagedMetadata()
        stats = Librarian.EnsureStats()
        printer.StatusUpdate("Retrieving spreadsheet records...")
        for record in enumerate(client.GetRecords(spreadsheet_key, table_id).entry):
            metadata = Librarian.ExtractDocumentMetadata(record[1], stats.next_staging_id, printer)
            if metadata == None: continue
            printer.StatusUpdate('Staging %s for import...' % metadata.title)
            metadata.WriteToPrinter(printer)
            metadata.put()
            stats.next_staging_id = stats.next_staging_id + 1
        stats.put()
        return True

    @staticmethod
    def StageMetadata(printer, authenticator):
        # TODO(sdd): check for illegal characters in all spreadsheet fields
        printer.StatusUpdate("Authenticating to spreadsheet service...")
        client = authenticator.CreateClient(
            gdata.spreadsheets.client.SpreadsheetsClient,
            'https://spreadsheets.google.com/feeds',
            'google.com', 'alyssad')
        if client == None: return False # Need to redirect for authentication
        spreadsheet_name = 'REC Library Ingestion Pipeline v1.99'
        spreadsheet_key = Librarian.GetSpreadsheetKey(client, spreadsheet_name)
        if spreadsheet_key == None:
            printer.FinalReport("Unable to locate '%s' spreadsheet" % spreadsheet_name)
            return False
        result1 = Librarian.StageMetadataSheet(
            client, spreadsheet_key, 'Archived', 'Unstaged Metadata Table', {
                'A' : 'Timestamp',
                'B' : 'Username',
                'C' : 'FileType',
                'D' : 'DocumentType',
                'E' : 'InternalBookmarkUrl',
                'F' : 'ExternalBookmarkUrl',
                'G' : 'Filename',
                'H' : 'YouTubeUrl',
                'I' : 'Title',
                'J' : 'Description',
                'K' : 'MaterialType',
                'L' : 'GradeLevels',
                'M' : 'AudienceAndSub',
                'N' : 'Topics',
                'O' : 'Source',
                'P' : 'PublicationYear',
                },
                                 printer)
        result2 = True
        # TODO(sdd): Why can't we put a table into a form-based sheet?
        # Until we do that, we have to copy everything over each time
        # result2 = Librarian.StageMetadataSheet(
        # client, spreadsheet_key, 'Form Responses', 'Form Responses Table', {
        # 'M' : 'Title',
        # 'O' : 'Types',
        # 'V' : 'Authors',
        # 'N' : 'Description',
        # 'S' : 'Topics',
        # 'P' : 'Grades',
        # 'R' : 'Languages',
        # 'Q' : 'Audiences',
        # 'W' : 'PublicationYear',
        # 'X' : 'SourceOrgs',
        # 'T' : 'SourceCountries',
        # 'U' : 'SourceRegions',
        # 'D' : 'EntryType',
        # 'C' : 'FileType',
        # 'G' : 'ExternalBookmarkURL',
        # 'E' : 'InternalBookmarkURL',
        # 'I' : 'LiferayFileURL',
        # 'K' : 'Filename'
        # },
        # printer)
        return result1 and result2

    @staticmethod
    def PrepareImportableDocuments(printer):
        glp.util.EnsureBotoConfig()
        importable = []
        staged_keys = {}
        printer.StatusUpdate("Retrieving list of staged files...");
        bucket = Librarian.GetBucket()
        count = 1
        for uri in Librarian.StagedFileURIs(bucket):
            key = bucket.get_key(uri.object_name)
            filename = key.get_metadata('glp-source-uri')
            staged_keys[filename] = key
            printer.StatusUpdate("Processing staged file: %s" % filename)
        stats = Librarian.EnsureStats()
        printer.StatusUpdate("Retrieving staged metadata...");
        # TODO(sdd): don't assign a new id if id != key
        for metadata in Librarian.StagedMetadata():
            document_type = None
            is_liferay = metadata.source_uri and metadata.source_uri.startswith('http://rec.glp.net/')
            storage_type = metadata.storage_type()
            content_type = None
            if storage_type.startswith('site-'):
                metadata.id = 'rec-%07d-XXX' % stats.next_bookmark_id
                metadata.content_type = 'text/html'
                metadata.gs_uri = 'gs://%s/%s.txt' % (bucket.name, metadata.id)
                stats.next_bookmark_id = stats.next_bookmark_id + 1
                if storage_type == 'site-youtube':
                    document_type = 'YouTube bookmark'
                elif storage_type == 'site-external':
                    document_type = 'external bookmark'
                elif storage_type == 'site-rec':
                    document_type = 'REC bookmark'
                else:
                    raise KeyError('unknown storage type: %s' % storage)
            elif storage_type != 'google-storage':
                raise KeyError("unknown storage type: %s" % storage)
            elif not metadata.source_uri in staged_keys:
                printer.StatusUpdate('warning: ignoring %s (%s not staged)' % (metadata.title, metadata.source_uri))
                continue
            else:
                staged_key = staged_keys[metadata.source_uri]
                metadata.id = 'rec-%07d-XXX' % stats.next_file_id
                metadata.content_type = staged_key.content_type
                staged_ext = re.match('rec-\d+-XXX\.(.*)', staged_key.name).group(1)
                metadata.storage_uri = 'gs://%s/%s' % (staged_key.bucket.name, staged_key.name)
                metadata.gs_uri = 'gs://%s/%s.%s' % (bucket.name, metadata.id, staged_ext)
                stats.next_file_id = stats.next_file_id + 1
                if is_liferay:
                    document_type = 'Liferay file'
                else:
                    document_type = 'staged file'
            printer.StatusUpdate('Queueing %s %s (%s) for import as %s.' % (document_type, metadata.title, metadata.key().name(), metadata.id))
            importable.append(metadata.key().name())
            metadata.put()
        stats.put()
        return importable

    @staticmethod
    def GetCleanValue(record, field):
        value = record.value_for_name(field)
        if not value: return ''
        return glp.util.EscapeHTML(value.encode('ascii', 'ignore')).rstrip().lstrip()

    @staticmethod
    def ImportDocument(staging_id, printer):
        glp.util.EnsureBotoConfig()
        metadata = DocumentMetadata.get_by_key_name(staging_id)
        imported_id = metadata.id.replace("XXX", '001')
        metadata.gs_uri = metadata.gs_uri.replace('XXX', '001')
        imported_uri = boto.storage_uri(metadata.gs_uri)
        if metadata.storage_type() == 'google-storage':
            staged_uri = boto.storage_uri(metadata.storage_uri)
            metadata.storage_uri = 'http://commondatastorage.googleapis.com/%s/%s' % (imported_uri.bucket_name, imported_uri.object_name)
        headers = {
            'Content-Type' : metadata.content_type,
            'GLP-Storage-URI' : metadata.storage_uri,
            'GLP-Title': metadata.title,
            'GLP-Description': metadata.description,
            'GLP-Labels': ' '.join(metadata.labels)
            }
        if metadata.source_uri: headers['GLP-Source-URI'] = metadata.source_uri
        if metadata.authors: headers['GLP-Authors'] = '; '.join(metadata.authors)
        if metadata.sources: headers['GLP-Sources'] = '; '.join(metadata.sources)
        if metadata.year: headers['GLP-Year'] = str(metadata.year)
        if metadata.storage_type() == 'google-storage':
            printer.StatusUpdate("Importing file %s as %s..." % (metadata.source_uri, imported_uri.uri))
            staged_key = staged_uri.get_key()
            headers['x-amz-acl'] = 'public-read'
            # TODO(sdd): shouldn't this copy the ACL and content type?
            try:
                staged_key.copy(imported_uri.bucket_name, imported_uri.object_name, headers)
                # staged_key.delete()
            except boto.exception.S3ResponseError, e:
                printer.StatusUpdate("UNABLE TO UPLOAD FILE %s: %s" % (imported_uri.object_name, e.body))
                return False
        else:
            printer.StatusUpdate('Importing bookmark %s as %s...' % (metadata.storage_uri, imported_uri.uri))
            # TODO(sdd): File bug about header prefix business.
            bookmark_headers = {}
            for field, value in headers.iteritems():
                clean_field = 'x-amz-meta-%s' % field if field[0:4] == 'GLP-' else field
                bookmark_headers[clean_field] = value
            try:
                imported_uri.new_key().set_contents_from_string('', bookmark_headers, True, None, 10, 'public-read')
            except boto.exception.S3ResponseError, e:
                printer.StatusUpdate("UNABLE TO UPLOAD BOOKMARK %s: %s" % (imported_uri.object_name, e.body))
                return False
        printer.StatusUpdate('Queueing %s for metadata update...' % imported_uri.uri)
        metadata.delete()
        glp.util.EnqueueTask('library', 'update-metadata', imported_uri.uri, 'librarian')
        return True

    @staticmethod
    def gs_uri_to_id(uri):
        return re.match('.*://.*/(rec-\d+-\d+)\.[a-z]+', uri).group(1)

    @staticmethod
    def UpdateMetadata(gs_uris, printer):
        glp.util.EnsureBotoConfig()
        printer.StatusUpdate('Starting to update metadata for %s' % ', '.join(gs_uris))
        if len(gs_uris) == 0: return
        for gs_uri in [boto.storage_uri(gs_uri) for gs_uri in gs_uris]:
            printer.StatusUpdate('Updating metadata for %s...' % gs_uri.uri)
            key = gs_uri.get_key()
            id = Librarian.gs_uri_to_id(gs_uri.uri)
            source_uri = key.get_metadata('glp-source-uri') if 'glp-source-uri' in key.metadata else None
            authors = key.get_metadata('glp-authors').split('; ') if 'glp-authors' in key.metadata else []
            sources = key.get_metadata('glp-sources').split('; ') if 'glp-sources' in key.metadata else []
            year = int(key.get_metadata('glp-year')) if 'glp-year' in key.metadata else None
            labels = [LabelUtils2.TextToId(label) for label in key.get_metadata('glp-labels').split(' ')]
            if 'status:replaced' in labels:
                printer.StatusUpdate('  Replaced.')
                document.delete()
                continue
            document = DocumentMetadata(
                id=id,
                gs_uri=gs_uri.uri,
                storage_uri=key.get_metadata('glp-storage-uri'),
                source_uri=source_uri,
                content_type=key.content_type,
                title=key.get_metadata('glp-title'),
                authors=authors,
                sources=sources,
                description=key.get_metadata('glp-description'),
                year=year,
                labels=labels)
            document.put()
            document.WriteToPrinter(printer)
            glp.util.EnqueueTask('library', 'update-index', id, 'librarian')

    @staticmethod
    def UpdateIndex(ids, printer):
        label_infos = LabelInfos()
        for label_info in LabelInfo.all():
            label_infos[label_info.label] = label_info
        for id in ids:
            printer.StatusUpdate('Updating index entries for %s' % id)
            metadata = DocumentMetadata.get_by_key_name(id)
            labels = set(metadata.labels)
            label_topics = set(LabelUtils.ComputeLabelTopics(metadata))
            indexed_id = '%s#indexed' % id
            last_metadata = DocumentMetadata.get_by_key_name(indexed_id)
            if (last_metadata == None):
                last_label_topics = set()
                last_labels = set()
            else:
                last_label_topics = set(LabelUtils.ComputeLabelTopics(last_metadata))
                last_labels = set(last_metadata.labels)
            if metadata.status() == 'replaced':
                added_labels = set()
                removed_labels = last_labels
                added_label_topics = set()
                removed_label_topics = set(last_label_topics)
            else:
                added_labels = labels - last_labels
                removed_labels = last_labels - labels
                added_label_topics = label_topics - last_label_topics
                removed_label_topics = last_label_topics - label_topics
            for label in added_labels:
                label_infos[label].add_document(metadata.key())
            for label_topic in added_label_topics:
                label_infos[label_topic[0]].add_topic(label_topic[1])
            for label in removed_labels:
                label_infos[label].remove_document(metadata.id)
            for label_topic in removed_label_topics:
                label_infos[label_topic[0]].remove_topic(label_topic[1])
            label_infos['-'].add_document(metadata.key())
            metadata.clone(indexed_id).put()
        for info in label_infos.values():
            if (info.is_empty()):
                info.delete()
            else:
                info.put()
