'''A tool for more efficient deduplication when merging bibliographies.

When merging bibliographies, it is not uncommon to find that multiple
entries in the sources represent the same item. These duplicates must be
detected so that the duplicate entries may be merged. A naive approach
to deduplication might involve many repetitive calculations. This module
contains the L{Fingerprint} class which implements one strategy for
simplifying the process. A Fingerprint object is calculated for each
entry in each of the source bibliographies. Dedeuplication then proceeds
by comparing fingerprints instead of by comparing the entries directly.

The Fingerprint class provided here is relatively simplistic. Users are
encouraged to create subclasses to meet their needs

A well implemented Fingerprint subclass will allow quick comparisons
between fingerprints. Any expensive or potentially repetitive
calculations should be performed during fingerprint creation.

'''

from difflib import SequenceMatcher
import curses
import json
import locale
import os
import re
import string
import textwrap
import time

from bkn.bibtex.bibliography import *
from bkn.bibtex.error import BKNBibtexWarning
from bkn.bibtex.string_tools import dehyphenate_words
from bkn.bibtex.string_tools import strip_punctuation
from bkn.bibtex.string_tools import tex2ascii

locale.setlocale(locale.LC_ALL, '')
PREFERRED_ENCODING = locale.getpreferredencoding()


class Fingerprint:
    '''
    @param entry: The entry to fingerprint
    @type entry: L{BibEntry}
    '''
    
    STOPWORDS = ['','with','the','a','an','and']
    '''These strings will be ignored when creating the frequency table.'''

    def __init__(self, entry):
        '''Creates a new fingerprint from the supplied L{BibEntry}.'''
        try:
            self.year = self._extract_first_consecutive_digits(entry[u'year'])
        except:
            self.year = 0
        self.title = self._extract_and_normalize_title(entry[u'title'])
        self.token_freq = self._create_token_frequency_table(entry)

    def _create_token_frequency_table(self, record):
        result = {}
        for attribute_name in record.attribute_names():
            if not attribute_name in BIBTEX_FIELDS:
                continue # skip non-BibTeX fields
            attribute_value = record[attribute_name]
            normalized_text_1 = tex2ascii(attribute_value)
            normalized_text_2 = normalized_text_1.lower()
            normalized_text_3 = strip_punctuation(normalized_text_2)
            words = normalized_text_3.split()
            for word in words:
                if word in Fingerprint.STOPWORDS:
                    continue # skip stopwords
                result[word] = result.get(word,0) + 1
        return result
            
    def _extract_and_normalize_title(self, title):
        result = tex2ascii(title)
        result = dehyphenate_words(result)
        result = result.lower()
        return result
        
    def _extract_first_consecutive_digits(self, text):
        match_obj = re.search(r'\d+', text)
        try:
            return int(match_obj.group(0))
        except:
            raise BKNBibtexError('No digits found in text: "' + text + '".')
    
    def compare(self, other):
        '''Compares this fingerprint to another; the higher the value
        returned, the greater the chance that the two fingerprinted
        entries represent the same item.
        
        @param other:
         the fingerprint to which this fingerprint should be compared
        @type other: L{Fingerprint}
        @warning:
        distinct subclasses of Fingerprint cannot necessarily be compared!
        
        '''
        if not self.__class__ is other.__class__:
            warnings.warn(BKNBibtexWarning(
                'comparing fingerprints of different types may lead to unexpected behavior.'),
                stacklevel=2
                )
        
        score = 0
        
        # we weigh several factors in computing the score
        
        # the year has the highest weight. two entries with different years
        # are very unlikely to represent the same item
        if self.year != other.year:
            return 0
        
        # next we look at titles
        if self.title == other.title:
            score += 50
        elif self.title in other.title or other.title in self.title:
            score += 40
        # it might be good to put something about similarity here
                
        
        # finally we should look at word frequency
        differences = 0.0 # total difference in token frequency
        count = 0.0 # total number of tokens
        for key in set(self.token_freq.keys() + other.token_freq.keys()):
            count += self.token_freq.get(key,0) +  other.token_freq.get(key,0)
            differences += abs(self.token_freq.get(key,0) - \
                           other.token_freq.get(key,0))
        if count == 0: return 0
        score += (1 - differences / count) * 50
        return score

def suggest_duplicates(
        bib_list,
        filename,
        fp_class=Fingerprint,
        threshold=50):
    '''For each entry in each bib suggests entries of other bibs that may duplicate it.
    
    @param bib_list:
        a list of L{Bibliographies<Bibliography>} which may contain duplicate entries.
        Note that this method assumes that there are no duplicates within
        any given C{Bibliography}; duplicates are assumed to exist between
        bibliographies only.
    @param filename:
        the filename or path to use when saving suggestions. If a file
        already exists at the given location it will be opened and any
        existing sugestions in the file will be used as a starting point.
    @param fp_class:
        a C{Fingerprint} class which will be used for comparing entires.
        Defaults to the base C{Fingerprint} class.
    @type fp_class: L{Fingerprint}
    @param threshold:
        a number between 0 and 100. This method will suggest that two
        records are the same if their fingerprints compare with a
        confidence greater than this value.
    @rtype: C{None}
    '''
    DuplicateDetector(bib_list, filename, fp_class, threshold)

class DuplicateDetector():
    def __init__(self, 
        bib_list,
        filename,
        fp_class=Fingerprint,
        threshold=50):
        self.bib_list = bib_list
        self.filename = filename
        self.fp_class = fp_class
        self.threshold = threshold
        
        print "Auto deduplication in progress..."
        self._load_existing_suggestions()
        self._display_statistics()
        self._suggest_duplicates()
        self._save_suggestions()
    
    def _load_existing_suggestions(self):
        if os.path.exists(self.filename):
            print 'Opening existing suggestion file, "{0}".'.format(self.filename)
            with open(self.filename, 'r') as f:
                self.suggestions = json.load(f)
            print 'Loaded {0} suggestions from existing suggestion file.'.format(len(self.suggestions))
        else:
            self.suggestions = []
    
    def _save_suggestions(self):
        with open(self.filename, 'w') as f:
           json.dump(self.suggestions, f)
    
    def _display_statistics(self):
        stats = {
            'num_of_files' : len(self.bib_list),
            'len_product' : 'x'.join([str(len(bib.record_list)) for bib in self.bib_list])
        }
        print 'Performing duplicate detection for {num_of_files} files. Will ' \
              'check {len_product} records in all.'.format(**stats)
    
    def _suggest_duplicates(self):
        same_as_index = SameAsIndex()        
        same_as_index.build_index_from_suggestions(self.suggestions, use_only_accepted_suggestion=False)
        
        # create fingerprint index
        fingerprint_index = {}
        for bib in self.bib_list:
            for record in bib.record_list:
                fingerprint = self.fp_class(record)
                fingerprint_index[fingerprint] = record
        
        for bib in self.bib_list:
            for record in bib.record_list:
                # fingerprint is expensive to compute and worth
                # calculating before the loop begins
                fingerprint = self.fp_class(record)
                for candidate_fingerprint in fingerprint_index.keys():
                    confidence = fingerprint.compare(candidate_fingerprint)
                    if confidence > self.threshold:
                        candidate = fingerprint_index[candidate_fingerprint]
                        if record.parent_bib_id == candidate.parent_bib_id:
                            # we are assuming that records in the same bib are distinct
                            continue
                        if candidate.global_id() in same_as_index.get_equivalent_records(record):
                            # this assertion is already implied so we don't need to do anything
                            continue
                        suggestion = {
                            u'confidence' : confidence,
                            u'subject' : record.global_id(),
                            u'object' : candidate.global_id(),
                            u'suggestion' : u'same as',
                            u'reviewed' : False,
                            u'accepted' : False
                            }
                        self.suggestions.append(suggestion)
                        same_as_index.add_equivalent_records(record, candidate)
       
class SimpleConfirmationUI():
    def __init__(self, filename):
        self.filename = filename
        curses.wrapper(self)
        # this should restore normal terminal settings automatically,
        # but may not do so. This seems to be the fault of starting
        # multiple curses sessions in one script
        # https://bugs.launchpad.net/ubuntu/+source/python2.6/+bug/492140
    
    def __call__(self, stdscr):
        self.stdscr = stdscr
        self.height, self.width = stdscr.getmaxyx()
        self._simple_confirmation_ui()    
    
    def display(self, text):
        encoded_text = text.encode(PREFERRED_ENCODING)
        wrapped_text = textwrap.fill(
             encoded_text,
             self.width - 1,
             replace_whitespace=False,
             drop_whitespace=False)
        self.stdscr.addstr(wrapped_text)
    
    def get_user_response(self, options):
        '''Waits for the user to respond by entering one of the characters
        specified in C{options} and returns the user's response.
        '''
        options = [opt for opt in options] # converts string to list
        response = -1
        while response not in options:
            if response != -1:
                curses.flash()
            try:
                response = chr(self.stdscr.getch()).lower()
            except ValueError:
                pass
        return response
    
    def clear_screen(self):
        self.stdscr.clear()
        self.stdscr.refresh()
        time.sleep(0.1) # a slight pause makes for a better user experience
    
    def _simple_confirmation_ui(self):
        '''The actual work of simple_confirmation_ui() is done here.'''
        with open(self.filename, 'r') as f:
            suggestions = json.load(f)
        same_as_suggestions = filter(
            lambda x: x['suggestion'] == 'same as' and x['reviewed'] == False,
            suggestions
            )
        if len(same_as_suggestions) == 0:
            self.display("No same-as suggestions require confirmation.")
            time.sleep(1)
            return
        same_as_suggestions.sort( 
            lambda x, y: cmp(x['confidence'], y['confidence'])
            )
        total = len(same_as_suggestions)
        for num, suggestion in enumerate(same_as_suggestions):
            self.clear_screen()
            self.display('Same as suggestion {0} of {1}\n\n'.format(num + 1, total))
            self.display('Confidence: {0}\n\n'.format(str(suggestion['confidence'])))
            entry1 = record_from_id(suggestion[u'subject'])
            entry2 = record_from_id(suggestion[u'object'])
            temp_bib = Bibliography()
            # it is important that the added entries receive unique ids!
            temp_bib.add_record(entry1, id=temp_bib.get_unique_id())
            temp_bib.add_record(entry2, id=temp_bib.get_unique_id())
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                temp_bib.add_bibtex_output()
            if not entry1.attribute_is_blank(u'confirmation-text'):
                display1 = entry1[u'confirmation-text']
            else:
                display1 = tex2ascii(temp_bib.record_list[0].bibtex_out)
            if not entry2.attribute_is_blank(u'confirmation-text'):
                display2 = entry2[u'confirmation-text']
            else:
                display2 = tex2ascii(temp_bib.record_list[1].bibtex_out)
            self.display(u'{0} [{1}]\n\n'.format(display1, entry1.global_id()))
            self.display(u'{0} [{1}]\n\n'.format(display2, entry2.global_id()))
            self.display('Do these entries represent the same item? [ynq]')
            response = self.get_user_response('ynq')
            if response == 'n':
                suggestion['accepted'] = False
                suggestion['reviewed'] = True
            elif response == 'y':
                suggestion['accepted'] = True
                suggestion['reviewed'] = True
            elif response == 'q':
                break
        self.clear_screen()
        self.display('Saving your work...')
        with open(self.filename, 'w') as f:
            json.dump(suggestions, f)

def merge_bibs(bibs, suggestion_file):
    '''Merges the listed bibliographies using any accepted same as
    assertions from the supplied suggestion file.
    
    @todo:
        This method needs to be revised so as to assign ids in a
        consistent manner, i.e. the same id should be assigned to the
        same record on multiple passes.
    '''    
    result = Bibliography()
    print 'Merging bibs...'
    # load suggestion file
    with open(suggestion_file, 'r') as f:
        suggestions = json.load(f)
        
    same_as_index = SameAsIndex()
    same_as_index.build_index_from_suggestions(suggestions)

    bibs.reverse() # this ensures that records listed first have precedence

    # those records which have not yet been merged, i.e. all of them
    remaining_records = reduce(lambda x, y: x + y.record_list, bibs, [])
        
    # remove suppressed records
    for suggestion in suggestions:
        if not suggestion['suggestion'] == 'suppress':
            continue
        if not (suggestion['reviewed'] and suggestion['accepted']):
            continue
        print "supressing record",
        for r in remaining_records:
            if r.global_id() == suggestion[u'id']:
                try:
                    remaining_records.remove(r)
                except ValueError:
                    pass
        
    # add records to result, merging entries as needed
    while len(remaining_records) > 0:
        record = remaining_records.pop()
        equivalent_records_ids = same_as_index.get_equivalent_records(record)
        equivalent_records = [record_from_id(id) for id in equivalent_records_ids]
        new_record = result.add_record(record, id=result.get_unique_id())
        for other_record in equivalent_records:
            merge_records(new_record, other_record)
            try:
                for r in remaining_records:
                    if other_record.global_id() == r.global_id():
                        remaining_records.remove(r)
            except ValueError:
                pass # we already removed it.
        if new_record.attribute_is_blank(u'same-as'):
            new_record[u'same-as'] = [u'@@' + id for id in equivalent_records_ids]
        else:
            new_record[u'same-as'] += [u'@@' + id for id in equivalent_records_ids]
        new_record[u'same-as'] = list(set(new_record[u'same-as'])) # remove duplicates
    
    result.assign_numerical_ids()
    return result        

def merge_records(destination, source):
    '''Modifies destination by adding any non-standard fields from
    source not in destination.
    '''
    for field in source.attribute_map:
        if field == u'same-as':
            if destination.attribute_is_blank(field):
                 destination[field] = source[field]
            else:
                 destination[field] += source[field]
            continue
        if field not in BIBTEX_FIELDS and field not in destination.attribute_map:
            destination[field] = source[field]

def merge_bibs_by_id(bibs):
    new_bib = Bibliography()
    # create list of all the entries to be merged. we will remove each
    # entry as we make use of it.
    entries_to_add = reduce(lambda x, y: x + y.record_list, bibs, [])
    # we will be popping entries off the end of the list, so in order to
    # guarantee that we use values from the earliest listed bib we must
    # reverse the list
    entries_to_add.reverse()
    while len(entries_to_add) > 0:
        entry_to_add = entries_to_add.pop()
        try:
            existing_entry = new_bib.entry_from_id(entry_to_add[u'id'])
            merge_records(existing_entry, entry_to_add)
        except BKNBibtexError:
            new_bib.add_record(entry_to_add)
    return new_bib

class SameAsIndex():
    def __init__(self):
        self._same_as_index = {}
        
    def build_index_from_suggestions(self, suggestions, use_only_accepted_suggestion=True):
        for suggestion in suggestions:
            if not suggestion['suggestion'] == 'same as':
                continue
            if use_only_accepted_suggestion:
                if not (suggestion['reviewed'] and suggestion['accepted']):
                    continue
            entry1 = record_from_id(suggestion['subject'])
            entry2 = record_from_id(suggestion['object'])
            self.add_equivalent_records(entry1, entry2)

    def add_equivalent_records(self, *records):
        ids = [record.global_id() for record in records]
        for id in ids:
            self._same_as_index.setdefault(id, set()).update(ids)
    
    def get_equivalent_records(self, record):
        try:
            return self._same_as_index[record.global_id()]
        except KeyError:
            return set([record.global_id()]) # a record is always the same as itself!
        
    def __contains__(self, record):
        id = record.global_id()
        return self._same_as_index.has_key(id)
