#!/usr/bin/python

"""
Low-Level Features Computation (for multiple files)
Python 3
"""

import os
import sys
import string
import math
from collections import defaultdict

_avg_sent_in_words = "avg_sent_in_words"
_avg_sent_in_chars = "avg_sent_in_chars"
_long_words_portion = "long_words_portion"
_avg_punct_per_sent = "avg_punct_per_sent"
_readability = "readability"
_entropy = "entropy"

class FeatureCollector(object):
    """
    Low-level features collector
    """
    def __init__(self):
        self.get_featconfig()
        
        self.useAvgSentW = self.features.get(_avg_sent_in_words, False)
        self.useAvgSentC = self.features.get(_avg_sent_in_chars, False)
        self.useLongWords = self.features.get(_long_words_portion, False)
        self.usePunct = self.features.get(_avg_punct_per_sent, False)
        self.useReadability = self.features.get(_readability, False)
        self.useEntropy = self.features.get(_entropy, False)

        if not any((self.useAvgSentW, self.useAvgSentC,
                   self.useLongWords, self.usePunct,
                   self.useReadability, self.useEntropy)):
            raise ValueError("At least one feature should be used!")

        self.set_counters()
        self.result = {}

    def get_featconfig(self):
        """
        Retrieve the list of features to be used from configuration file
        """
        self.features = {}
        with open("config/features.txt", "r", encoding="UTF8") as fin:
            for line in fin:
                line = line.strip()
                if not line:
                    continue
                items = line.split()
                self.features.update({items[0]: bool(int(items[1]))})
        return True

    def set_counters(self):
        """
        Initialize counters
        """
        self.result = {}
        self.ctr_s = 0  # Sentences counter
        self.ctr_w = 0  # Words coutner
        self.ctr_c = 0  # Chars counter
        self.ctr_l = 0  # Long words counter
        self.m_len = 6  # Min length
        self.ctr_t = 0  # Tokens counter
        self.ctr_p = 0  # Punctuation marks counter
        self.ctr_v = 0  # Vowels counter
        self.vowels = set("ёуеыаоэяяию")
        if self.useEntropy:
            self.words = defaultdict(float)
        return True

    def process_sent(self, tokens):
        """
        Update counters for one sentence
        """
        words = [x for x in tokens if x.strip(string.punctuation)]
        self.ctr_s += 1
        self.ctr_w += len(words)
        self.ctr_c += len("".join(tokens))
        self.ctr_l += len([x for x in words if len(x) > self.m_len])
        self.ctr_t += len(tokens)
        self.ctr_p += len(tokens) - len(words)
        self.ctr_v += len([x for x in "".join(words) if x in self.vowels])
        if self.useEntropy:
            for word in words:
                self.words[word] += 1            
        return True

    def normalize_freqs(self):
        """
        Normalize frequencies of words
        """
        total = sum(self.words.values())
        for word, freq in self.words.items():
            self.words[word] = float(freq) / total
        return True

    def compute_features(self):
        """
        Compute final results
        """
        if self.useAvgSentW:
            self.result[_avg_sent_in_words] = float(self.ctr_w) / self.ctr_s
        if self.useAvgSentC:
            self.result[_avg_sent_in_chars] = float(self.ctr_c) / self.ctr_s
        if self.useLongWords:
            self.result[_long_words_portion] = float(self.ctr_l) / self.ctr_w
        if self.usePunct:
            self.result[_avg_punct_per_sent] = float(self.ctr_p) / self.ctr_s
        if self.useReadability:
            self.result[_readability] = 0.39 * float(self.ctr_w) / self.ctr_s + float(self.ctr_v) / self.ctr_w - 15.59
        if self.useEntropy:
            self.result[_entropy] = - sum([freq * math.log(freq) for freq in self.words.values()])
        return True

    def parse_file(self, filename):
        """
        Process one file
        """
        tokens = []
        with open(filename, "r", encoding="UTF8") as fin:
            for line in fin:
                line = line.strip()
                if not line:
                    self.process_sent(tokens)
                    del tokens[:]
                    tokens = []
                    continue
                tokens.append(line)
                
        if self.useEntropy:
            self.normalize_freqs()
            
        return True

    def calc_mult(self, folder, output):
        """
        Process multiple files.
        Folder should contain ONLY tokenized files to be processed
        """
        files  = [os.path.join(os.path.dirname(sys.argv[0]), os.path.join(folder , x))
                                 for x in os.listdir(folder)]

        with open(os.path.join("data/", output), "w", encoding="UTF8") as fout:          
            fout.write("corpus\t" + "\t".join(sorted([x for x in self.features.keys() if self.features[x]])) + "\n")  # Write header
            
            for (filename, fileshort) in zip(files, os.listdir(folder)):
                self.set_counters()
                fileshort = fileshort[:fileshort.find(".")]
                fout.write(fileshort + "\t")
                self.parse_file(filename)
                self.compute_features()
                fout.write("\t".join([str(v) for k, v in sorted(self.result.items())]) + "\n")
                print(filename, "done!")
        return True

if __name__ == "__main__":
    usage = "python collect_features.py folder_name output_file_name"
    try:
        folder, output = sys.argv[1:3]
        feater = FeatureCollector()
        feater.calc_mult(folder, output)
        
    except Exception as e:
        print(e)
        print(usage)    
        
        
