# -*- coding: utf-8 -*-
import re
import os
import hashlib
import cPickle
from tokenizer import Tokenizer 
import stemmer

class Index():
    def __init__(self):
        self.number = 0
        self.index = {}
        self.tys  = 0
        self.s = stemmer.Stemmer()       
        script_path = os.path.dirname( os.path.abspath( __file__ ) )
        self.index_path = script_path + '/index'
        if not os.path.exists(self.index_path):
            os.makedirs(self.index_path)

        if os.path.isfile('docs.whizzy'):
            docs = open('docs.whizzy', 'r')
            self.docs = cPickle.load(docs)
            docs.close()
        else:
            self.docs = {}

    def parse(self, file):
        self.articles = open(file, 'r')
        
        for i, title, article in self.articles_generator():
            self.postings_generator( i, article )
            self.docs[i] = title
            self.saveCachedIndex()
        self.number = 1000000
        self.saveCachedIndex()

    def articles_generator(self):
        first = True
        i = 0
        for line in self.articles:
            if self.is_start(line):
                i = i + 1
                if not first:
                    yield (i, title, article)
                else:
                    first = False

                title = self.get_title(line)
                article = []

            else:
                article.append(line)
                
    def postings_generator(self, i, article ):
        tokenizer = Tokenizer(article, word_processor=self.s)
        tokens = {}

        for j, token in  tokenizer.next():
            if not token in tokens:
                tokens[token] = []
            tokens[token].append(j)
        self.addToIndex(i, tokens)

    def addToIndex(self, i, tokens):
        for token, positions in tokens.iteritems():
            if not token in self.index:
                self.index[token] = []
            self.index[token].append( i )

            
    def saveCachedIndex(self):

        if self.number % 1000 == 0:
            print self.tys
            self.tys = self.tys + 1

        if self.number > 25000:
            self.number = 0
            self.save()
            del self.index 
            self.index = {}
        else:
            self.number = self.number + 1

    def get_path_for_token(self, token):
        name = hashlib.sha256(token).hexdigest()
        path = self.index_path
        index = 0

        steps = [1,1,1,1,1,1,1,1]
        pop = 0
        for step in steps:
            folder_name = name[pop]
            path = path + '/' + folder_name
            if not os.path.exists(path):
                os.makedirs(path)
            pop = pop + step

        path = path + '/' + name

        return path

    def save(self):
        for token, postings in self.index.iteritems():
            path = self.get_path_for_token(token)
            if os.path.exists(path):
                file = open(path, 'r')
                old_postings = cPickle.load(file)
                file.close()
            else:
                old_postings = []
            
            file = open(path, 'w')
            old_postings.append('')
            old_postings[-1:] = postings
            cPickle.dump(old_postings, file)
            file.close()        

    def getIndex(self, token):
        name = hashlib.sha256(token).hexdigest()
        path = script_path
        index = 0
        while index < 40:
            index4 = index + 4
            folder_name = napis[index:index4]
            path = path + '/' + folder_name
            if not os.path.exists(path):
                return []
            index = index4

        path = path + '/' + name[35:39]
        file = open(sciezka, 'r')
        result = cPickle.load(file)
        file.close()
        return result

    def is_start(self, line):
        return re.match(r"##TITLE##", line) <> None

    def get_title(self, line):
        return re.sub(r"##TITLE## ", '', line)

    def get_or(self, first_token, second_token):
        first_list = self.getIndex(first_token)
        second_list = self.getIndex(second_token)

        return mergePosting( first_list, second_list )

    def get_and(self, first_word, second_word):
        first_list = self.getIndex(first_token)
        second_list = self.getIndex(second_token)
        return mergeANDPosting( first_list, second_list )


def mergePosting(a, b):
    if not a:
        return b
    if not b:
        return a

    result = []

    while (a and b):
        if a.doc_id < b.doc_id:
            result += a
            a = a[1:]
        else:
            result += b
            b = b[1:]

    if not a and b:
        result.extend(b)

    if a and not b:
        result.extend(a)

    return result

def mergeANDPosting(a, b):
    if not a:
        return []
    if not b:
        return []

    result = []

    while (a and b):
        if a.doc_id == b.doc_id:
            result += a
            a = a[1:]
            b = b[1:]

        if a.doc_id < b.doc_id:
            a = a[1:]
        else:
            b = b[1:]

    if not a and b:
        result.extend(b)

    if a and not b:
        result.extend(a)

    return result

