from __future__ import division
import re, bisect, fnmatch
from dataio import FileManager
from math import sqrt,ceil,log10,pow
from util import *
from bisect import bisect_left, bisect_right, insort
from collections import deque, defaultdict
from array import array
from heapq import nlargest
from itertools import chain


class Dictionary:
    '''Dictionary of terms.'''
    def __init__(self, filename=''):
        '''Constructor of Dictionary.'''
        # Next term's ID.
        self.tid = 0
        # Term - termid dict, e.g., {'apple':1, 'pear':2}. allows constant time search for term id with term string.
        self.termdict = dict()
        # words, which are terms before stemming. e.g., ['apple', 'pear', 'november']
        # self.prefixes = set([]) #self.prefixes = []
        self.prefixes = defaultdict(set)
        # reversed words
        #self.suffixes = set([]) #self.suffixes = []
        self.suffixes = defaultdict(set)
        # Dictionary file name
        self._file = filename + '_dictionary.txt'

    def __getstate__(self):
        """Return state values to be pickled."""
        return (self.tid, self.termdict, self._file)
    def __setstate__(self, state):
        """Restore state from the unpickled state values."""
        self.tid, self.termdict, self._file = state
        self.prefixes = dict()
        self.suffixes = dict()
        
    def getTermID(self, term):
        '''Look for a term's ID with term string.
        @param term: The term's string.
        @return: If the term is found, return its term ID, else return None.'''
        return self.termdict.get(term)
    
    def getWordID(self, word):
        '''Look for a word's ID with term string.
        @param term: The word's string.
        @return: If the word is found, return its word ID, else return None.'''
        return self.worddict.get(word)

    def append(self, term):
        '''Appends a new term into the dictionary.
        @param term: The term string to be appended.
        @return: The Term instance of the newly added term.'''
        t = stem(term,1)
        if t not in STOPWORDS:
            if not self.termdict.has_key(t):
                self.termdict[t] = self.tid
                self.tid += 1
            tid = self.termdict[t]
        else:
            tid = -1
        self.prefixes[term[0]].add(term)
        self.suffixes[term[-1]].add(term[-1::-1])
        return t, tid

    def lookup(self, s):
        ''' Used to perform a dictionary lookup with wildcard expressions. For non-wildcard lookups, use getTermID.
        Example:
        >>> lookup('no*er')
        ['november']
        >>> lookup('*u*e*r')
        ['summer']
        @param s: wildcard expression
        @return: A list of matching words. Notice here we don't return terms, instead we return words, which are terms before stemming
        '''
        prefix = s[:s.index('*')]
        suffix = s[s.rindex('*')+1:]
        pattern = re.compile(fnmatch.translate(s))
        pterms = []
        sterms = []
        if s.count('*') > 1:    # filter needed
            temp = {}
            if prefix:
                if prefix[0] not in self.prefixes:
                    self.prefixes[prefix[0]] = FileManager.readprefix(self._file, prefix[0])
                pp = self.prefixes[prefix[0]]
                pterms = pp[bisect_left(pp,prefix):bisect_right(pp,stradv(prefix))]
                for p in pterms:
                    pm = pattern.match(p)
                    if pm:
                        yield pm.group()
            elif suffix:    #no prefix, only suffix
                suffix = suffix[-1::-1]
                if suffix[0] not in self.suffixes:
                    self.suffixes[suffix[0]] = FileManager.readsuffix(self._file, suffix[0])
                ss = self.suffixes[suffix[0]]
                sterms = ss[bisect_left(ss,suffix):bisect_right(ss,stradv(suffix))]
                for s in [st[-1::-1] for st in sterms]:
                    sm = pattern.match(s)
                    if sm:
                        yield sm.group()
        else:   #no need to filter, output results directly
            if prefix:
                if prefix[0] not in self.prefixes:
                    self.prefixes[prefix[0]] = FileManager.readprefix(self._file, prefix[0])
                pp = self.prefixes[prefix[0]]
                pterms = pp[bisect_left(pp,prefix):bisect_right(pp,stradv(prefix))]
            if suffix:
                suffix = suffix[-1::-1]
                if suffix[0] not in self.suffixes:
                    self.suffixes[suffix[0]] = FileManager.readsuffix(self._file, suffix[0])
                ss = self.suffixes[suffix[0]]
                sterms = [st[-1::-1] for st in ss[bisect_left(ss,suffix):bisect_right(ss,stradv(suffix))]]
            if pterms != [] and sterms != []:
                for i in set(pterms) & set(sterms):
                    yield i
            else:
                for i in set(pterms) or set(sterms):
                    yield i

class Posting:
    '''Data structure for posting lists.'''
    
    def __init__(self, id, freq=0, postings={}):
        '''Constructor of Posting.
        @param id: Term's ID.
        @param freq: Term's frequency. default 0.
        @param postings: Term's posting list. default empty dict.'''
        # Term ID
        self.id = id
        # Frequency of this term, sum of all occurances in all documents
        self.freq = 0
        # dict of list. e.g. {2:[3,67,88], 4:[45]}, which means this term appeared in doc 2 and 4, in position 3,67,88 and 45 repectively.
        self.postings = defaultdict(list)
        # document frequencies
        self.df = defaultdict(int)
        # Skip pointers for positional indices. e.g., {5:{0:3,3:7}, 9:{}, 100:{46: 90}}
        self.skip = {}
        # Skip pointers for docIDs. e.g., {0:3,3:7}
        self.docskip = {}
        # The max docID so far
        self._maxdocid = -1
        # Total number of documents, or 'document frequency'
        self._docnum = 0
        self.tokens = defaultdict(list)

    @property
    def docnum(self):
        return self._docnum

    def __getstate__(self):
        """Return state values to be pickled."""
        return (self.id, self.freq, dict(self.postings), self.skip, self.docskip, self._maxdocid, self._docnum, dict(self.df), dict(self.tokens))
    def __setstate__(self, state):
        """Restore state from the unpickled state values."""
        self.id, self.freq, self.postings, self.skip, self.docskip, self._maxdocid, self._docnum, self.df, self.tokens = state
    def __getitem__(self, docid):
        ''' Implemented to enable posting[n] operation. '''
        return self.postings.get(docid)

    def merge(self, p):
        if self.id != p.id:
            return
        else:
            self.freq += p.freq
            for k,v in p.postings.items():
                if self.postings.has_key(k):
                    self.postings[k] = sorted(list(set(self.postings[k]) | set(v)))
                else:
                    self.postings[k] = v
            self._maxdocid = max((self._maxdocid, p._maxdocid))
            self._docnum = max((self._docnum, p._docnum))

    def append(self, docid, positionid, token):
        '''Append a positional index to this term's posting list.
        @param docid: The ID of the document where the term was found.
        @param positionid: Positional index in the document where the term occured.'''
        if docid > self._maxdocid:
            self._maxdocid = docid
            self._docnum += 1
        insort(self.postings[docid], positionid)
        self.df[docid] += 1
        self.freq += 1
        insort(self.tokens[token], docid)

    def getDocIDs(self):
        return sorted(self.postings.keys())

    def getDocIDsForTokens(self, tokens):
        for docid in sorted(set(chain(*[self.tokens[i] for i in filter(lambda x: x in tokens, self.tokens.keys())]))):
            yield docid

    def getPostings(self, docid):
        return self.postings[docid]

    def buildSkipPointer(self):
        '''Build skip pointers for postings. if length of id is P,
        use sqrt(P) evenly-spaced skip pointers.
        @return: a dict of skip points (input's index)
        input list:[2,4,8,16,19,23,28,43]
        output skip points:{0:3,3:7}'''
        #index increment
        skip = {}
        for k,v in self.postings.items():
            skip[k] = {}
            l = len(v)
            dx = int(ceil(l//sqrt(l)))
            if dx < 2:
                del skip[k]
                continue
            i = 0
            while i < l:
                if i + dx < l:
                    skip[k][i] = i + dx
                i += dx
            if not skip[k]: del skip[k]
        self.skip = skip
        dl = len(self.postings)
        dx = int(ceil(dl//sqrt(dl)))
        if dx >= 2:
            i = 0
            while i < dl:
                if i + dx < dl:
                    self.docskip[i] = i + dx
                i += dx
        return

class Index:
    '''Index Class.'''

    FLUSH = 1000

    def __init__(self, file=''):
        '''Constructor of Collection.
        @param file: file name of the xml file, e.g., 'collections/seasons.xml'
        '''
        # Common file prefix, e.g., 'collections/seasons'
        self._file = file[:-4]
        # Filename of dictionary file
        self._dictfile = self._file + "_dictionary.txt"
        # Filename of postings file
        self._postingfile = self._file + "_postings.txt"
        # Filename of pagerank file
        self._pagerankfile = self._file + '_pagerank.txt'
        # Filename of labMNB file
        self._labmnbfile = self._file + '_labmnb.txt'
        # Filename of labSVM file
        self._labsvmfile = self._file + '_labsvm.txt'
        # dict of Posting instances, e.g., {0: <instance of Posting #0>, 1: <instance of Posting #1>,...}. Constant time seek.'''
        self.postings = {}
        # dict of Posting lists for words
        self.wordpostings = {}
        # stores the sqrt of the sum of term frequency squares
        self.normTFs = []
        # An instance of Dictionary.
        self.dictionary = Dictionary(self._file)
        # inversed document frequencies
        self.idfs = array('I')
        # number of all documents
        self._docnum = 0
        # Record the max. term id so far.
        self._maxtid = -1
        # titles of docs
        self.titles = []
        
        # Parses a xml document and extract terms and positional indices.
        FileManager.parse(file,self)
        self.storetfidfs()
        # Build skip pointers.
        self.buildSkipPointers()
        # Write the results back to dictionary file and postings file.
        FileManager.write(self, self._file)

    def __getstate__(self):
        """Return state values to be pickled."""
        return (self._file, self._dictfile, self._postingfile, self._pagerankfile, self._labmnbfile, self._labsvmfile, self._docnum, self._maxtid)
    def __setstate__(self, state):
        """Restore state from the unpickled state values."""
        self._file, self._dictfile, self._postingfile, self._pagerankfile, self._labmnbfile, self._labsvmfile, self._docnum, self._maxtid = state
        self.postings = {}
        self.wordpostings = {}
        self.documents = {}
        self.dictionary = None
        self.normTFs = []
        self.idfs = []
        self.pagerank = []
        self.labmnb = []
        self.labsvm = []

    @property
    def docnum(self):
        return self._docnum
    
    @property
    def filename(self):
        return self._file

    @property
    def dictfile(self):
        return self._dictfile
    
    @property
    def postingfile(self):
        return self._postingfile
    
    @classmethod
    def fromFile(cls, dictfile, postingfile, pagerankfile=None, labmnb=None, labsvm=None, advanced=False):
        '''Create an index from dictionary file and postings file.
        @return: an index instance.'''
        idx = FileManager.read(dictfile, postingfile, pagerankfile, labmnb, labsvm, advanced)
        idx._dictfile = dictfile
        idx._postingfile = postingfile
        return idx

    def _flush(self):
        FileManager.writeposting(self, self._postingfile)
        self.postings = {}

    def buildSkipPointers(self):
        '''Build skip pointers for the postings.'''
        for k,v in self.postings.items():
            v.buildSkipPointer()

    def storetfidfs(self):
        self.idfs = (len(self.postings[k].postings) for k in self.postings)            

    def byTerm(self, term):
        '''Look up the Posting instance of a term with the term's string.
        @param term: The term string.
        @return: if the Posting instance is found, return it, else return None.'''
        termID = self.dictionary.getTermID(term)
        if termID != None and termID <= self._maxtid:
            try:
                return self.postings[termID]
            except:
                p = FileManager.readposting(self._postingfile,termID)
                if p:
                    self.addPosting(p)
                    return p
                else:
                    return None
        else: return None

    def byTermID(self, termID):
        '''Look up the Posting instance of a term with the term's ID.
        @param term: The term ID.
        @return: if the Posting instance is found, return it, else return None.'''
        if termID <= self._maxtid:
            try:
                return self.postings[termID]
            except:
                p = FileManager.readposting(self._postingfile,termID)
                if p:
                    self.addPosting(p)
                    return p
                else:
                    return None
        else: return None

    def tf(self, termID, docID):
        '''Returns the term frequency in given document.
        @param term: term string
        @param docID: id of the document
        @return: tf
        '''
        return self.byTermID(termID).df.get(docID,0)

    def idf(self, termID):
        '''Returns the term's inverse document frequency.
        @param term: term's string.
        @return: idf of the term.
        '''
        return log10(self.docnum / self.idfs[termID])

    def append(self, term, docid, positionid, token=''):
        '''Append a positional index of a term. If the term already exists, append to its posting, else a new Posting instance is created.
        @param term: The term's string.
        @param docid: The document ID that the term occured.
        @param positionid: Position in the document where the term occured.
        @return: Return the Posting instance.'''
        p = self.byTerm(term)
        if not p:
            tid = self.dictionary.getTermID(term)
            if tid > self._maxtid:
                p = Posting(tid)
                self._maxtid = tid
                self.postings[tid] = p
        p.append(docid, positionid, token)
        return p

    def addPosting(self, posting):
        '''Adds a new Posting instance into self.postings.
        @param posting: the instance to be added.'''
        if posting.id > self._maxtid:
            self._maxtid = posting.id
        self.postings[posting.id] = posting

    def appendDocument(self, docid, title, text):
        '''Append a new document to self.documents. Should only called by RawXMLDocParser.
        @param docid: document ID.
        @param title: document's title.
        @param text: document's text.'''
        self._docnum += 1
        app = self.append
        dictappend = self.dictionary.append
        self.titles.append(title)
        tokens = re.split('[^A-Za-z]+', title.lower() + ' ' + text.lower())
        index = -1
        docs = defaultdict(int)
        for i in tokens:
            if not i: continue
            stemmedterm, termid = dictappend(i)
            if termid != -1:
                index += 1
                docs[termid] += 1
                app(stemmedterm, docid, index, i)
        norm = sqrt(sum([pow(cnt,2) for cnt in docs.values()]))
        self.normTFs.append(norm)
        








#deprecated
class Document:
    '''Data structure of a document.'''
    def __init__(self,id,title='',text=''):
        '''Constructor of Document.
        @param id: docID
        @param title: document's title
        @param text: document's text'''
        self.id = id
        self.title = title
        self.text = text

    