"""   Copyright 2011 Bart Enkelaar

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
"""

import re

CHARS = {".": False, "!": True, "?": True}
LEAD_WORDS = ["the", "a", "at", "along", "in", "over", "to", "for", "from", "with"]

BRACKETS_REGEX = r"\((?P<content>[^()]+)\)"
# I know the regex isn't airtight. It won't catch nasa.gov/fun, 
# and doesn't take ports (This I may need to change) or authorization into account.
URI_REGEX = r"(?i)(?:(?P<protocol>\w+)://)?(?P<host>[\w\d]+(?(protocol)(?:\.[\w\d]+)*|(?:\.[\w\d]+){2,}))(?P<rest>(?:/\S+)*)"
SMILEY_REGEX = r"(?<!\w\d)(?:[;:B8][-0]?[\(\)Dpb\[\]Oo]|[\^\$Oo0]_[\^\$Oo0])(?![\w\d])"

class TextExtractor():
    """Class that can be used to 'detweet' tweets."""
    def __init__(self, subject, line_limit=75):
        self.subject = subject
        self.line_limit = line_limit
    
    def extract_english(self, line):
        """Takes a tweet and returns a list of human readable sentences
            Currently:
                - Removes URIs (imperfectly)
                - Removes hashtags that are not preceded by one of a list of 
                    words that imply the tag is being used in the sentence,
                    otherwise removes the hash sign.
                - Removes @references and RT notes at the beginning of a tweet,
                    removes the @ sign from @references at other places.
                - Removes smileys
                - Splits lines on full stops, exclamation marks and question marks
                - Extracts sentences between brackets
        """
        line = self._remove_uris(line)
        line = self._remove_tags(line)
        line = self._remove_at_names(line)
        line = self._remove_smileys(line)
        return self._split_sentences(line)
    
    def _remove_uris(self, line):
        uri_regex = re.compile(URI_REGEX)
        return re.subn(uri_regex, "", line)[0].strip()

    def _remove_tags(self, line):
        neg_behinds = "".join(["(?<!\\b%s\\s)" % w for w in LEAD_WORDS])
        hash_stripped = re.subn("(?i)%s#\\w+" % neg_behinds, "", line)[0]
        return self._remove_char("#", hash_stripped)
    
    def _remove_at_names(self, line):
        if line.startswith("RT") or line.startswith("@"):
            line = self._remove_at_names(re.sub("\\w+", "", line.lstrip("RT@ "), 1).strip())
        return self._remove_char("@", line)

    def _remove_smileys(self, line):
        return re.subn(SMILEY_REGEX, "", line)[0]

    def _remove_char(self, character, string):
        return string.replace(character, "").strip()

    def _split_sentences(self, input):
        result = self._extract_brackets(input)
        result = self._split_line_endings(result)
        return self._control_length(result)

    def _extract_brackets(self, input):
        result = []
        work = input
        m = re.search(BRACKETS_REGEX, work)
        while m:
            found = m.group("content")
            if len(re.findall(" ", found)) > 1:
                result += [found]
            work = re.sub(BRACKETS_REGEX, "", work)
            m = re.search(BRACKETS_REGEX, work)
        return result + [work]

    def _split_line_endings(self, result):
        for k, v in CHARS.iteritems():
            in_result = []
            for line in result:
                in_result += self._split_on(k, line, v)
            
            result = in_result
        return result
    
    def _split_on(self, character, input, keep_char=False):
        sentences = input.split(character)
        if keep_char:
            sentences = [w + character for w in sentences[:-1]] + [sentences[-1]]
        return [w.strip(":-\" '()") for w in filter(lambda x: x.strip(), sentences)]

    def _control_length(self, lines):
        result = []
        for line in lines:
            if len(line) > self.line_limit:
                result += self._split_center(line)
            else:
                result.append(line)
        return result

    def _split_center(self, line):
        assert not "." in line
        assert len(line) > self.line_limit
        tenth = self.line_limit / 10
        center_pos = len(line) / 2
        center_start = center_pos - tenth
        center_end = center_start + 2 * tenth
        center_text = line[center_start : center_end]
        
        if len(re.findall(",", center_text)) == 1:
            split_center = center_text.split(",")
            return [line[0 : center_start] + split_center[0],
                    split_center[1] + line[center_end : len(line)]]
        if "," in line:
            return line.replace(", ", ",.").split(".")
        
        half_space = line[center_pos :].find(" ")
        if half_space >= 0:
            return [line[:center_pos + half_space],
                    line[center_pos + half_space:]]
        return [line]
