#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Croquette -- by Michael Lee and David Gomes

Please see /README_Croquette.txt
Licensed under the MIT License

Written in Python 2.7
"""

import os.path
import meta
import data

class UnknownFileType(Exception):
    pass

class Parser():
    def __init__(self,
                 colons=False,
                 tab_length=4,
                 encoding="ascii",
                 detokenize_type=None):
        """Initializes some variables and options.

        Arguments:
         - colons: If True, appends a colon to the start of each line.
         - tab_length: The amount of spaces a single tab equals.
         - encoding: If encoding=="ascii", then the .txt output will be
                     normal.  If encoding=="special", then the .txt output will
                     use special characters as much as possible.  For example,
                     if encoding=="utf8", then '->' will equal '→'.
         - detokenize_type: If detokenize_type=="basic", all 8xp files
                            will be forced to convert to TI-Basic files.
                            If detokenize_type=="axe", all 8xp files
                            will be forced to convert to Axe files.
        """
        self.set_colons(colons)
        self.set_tab_length(tab_length)
        self.set_encoding(encoding)
        self.set_detokenize_type(detokenize_type)
        
        self._comment = self._pad_or_truncate("Tokenized using TIConvert v{0}".format(meta.version), 42)
        self._program_name = ""
        
        self._whitespace_chars = ["\t", "\n", " "]
        return


    ########################################
    ### TWEAKING THE PARSER
    ###

    def set_options(self, colons=False,
                    tab_length=4,
                    encoding="ascii",
                    detokenize_type=None):
        """Sets the various options of the parser all at once.

        Arguments:
         - colons: If True, appends a colon to the start of each line.
         - tab_length: The amount of spaces a single tab equals.
         - encoding: If encoding=="ascii", then the .txt output will be
                     normal.  If encoding=="special", then the .txt output will
                     use special characters as much as possible.  For example,
                     if encoding=="special", then '->' will equal '→'.
         - detokenize_type: If detokenize_type=="basic", all 8xp files
                            will be forced to convert to TI-Basic files.
                            If detokenize_type=="axe", all 8xp files
                            will be forced to convert to Axe files.
        """
        self._colons = colons
        self._tab_length = tab_length
        self._encoding = encoding
        self._detokenize_type = detokenize_type
        return

    def set_colons(self, colons):
        """Sets whether or not colons will be appended to the start of each line.

        Arguments:
         - colons: A bool"""
        self._colons = colons
        return

    def set_tab_length(self, tab_length):
        """Sets the number of spaces per tab.

        Arguments:
         - tab_length: An int."""
        self._tab_length = tab_length
        return
    
    def set_encoding(self, encoding):
        """Sets the encoding; if output will use special ascii chars or not.

        Arguments:
         - encoding: Either "ascii" or "special".
        """
        self._encoding = encoding
        return

    def set_detokenize_type(self, detokenize_type=None):
        """Sets the detokenizing type to either Axe, Basic, or autodetect.

        Arguments:
         - detokenize_type: "axe"   forces any 8xp to convert using Axe tokens.
                            "basic" forces any 8xp to convert using Basic tokens.
                            None    will make the parser automatically detect the type.
                                    (axe programs start with a period.)
        """
        sanity = ["axe", "basic", None]
        if detokenize_type not in sanity:
            self._detokenize_type = None
        else:
            self._detokenize_type = detokenize_type
        return


    ########################################
    ### READING THE STATE OF THE PARSER
    ###

    def get_colons(self):
        """Returns if appending colons is enabled."""
        return self._colons

    def get_tab_length(self):
        """Returns the number of spaces per tab."""
        return self._tab_length

    def get_encoding(self):
        """Returns which type of output ("ascii" or "special") is used."""
        return self._encoding

    def get_detokenize_type(self):
        """Returns the detokenize type"""
        return self._detokenize_type

    def detect(self, input_path):
        """Returns the file extention type."""
        return input_path[-3:].lower()


    ########################################
    ### CONVERSION
    ###

    def just_do_it(self, input_path, output_path, program_name=None):
        """Detects the filetype and compiles or decompiles accordingly.

        Arguments:
         - input_path: A filepath to the 8xp file.
         - output_path: Where the txt file should be written to.
         - program_name: The name of your 8xp file.  If left blank,
                         the program will figure it out on its own.
                         
        Returns:
         - Nothing
        """
        input_type = self.detect(input_path)
        if input_type == "8xp":
            self.convert_8xp_to_txt(input_path, output_path)
        elif input_type == "txt":
            self.convert_txt_to_8xp(input_path, output_path, program_name)
        else:
            raise UnknownFileType("Unknown file extention")
        return

    def convert_8xp_to_txt(self, input_path, output_path):
        """Does everything you need to in order to get an 8xp to txt.

        Arguments:
         - input_path: A filepath to the 8xp file.
         - output_path: Where the txt file should be written to.
         
        Returns:
         - Nothing
        """
        output = self.convert_8xp_to_string(input_path)
        self.write_to_file(output, output_path)
        return

    def convert_txt_to_8xp(self, input_path, output_path, program_name):
        """Does everything you need in order to get a txt to 8xp.

        Arguments:
         - input_path: A filepath to the 8xp file.
         - output_path: Where the txt file should be written to.

        Returns:
         - Nothing
        """
        raw = self.get_raw_string(input_path)
        self.convert_string_to_8xp(raw, output_path, program_name)
        return

    def convert_8xp_to_string(self, input_path, encoding="ascii"):
        """Does everything you need to in order to get an 8xp to txt.

        Arguments:
         - input_path: A filepath to the 8xp file.
         - output_path: Where the txt file should be written to.
         - encoding: If encoding=="ascii", then the .txt output will be
                     normal.  If encoding=="special", then the .txt output will
                     use special characters as much as possible.  For example,
                     if encoding=="special", then '->' will equal '→'.

        Returns:
         - A string.  Tabs aren't replaced by whitespace.
        """
        self.set_encoding(encoding)
        
        raw = self.get_raw_hex(input_path)

        tokens = self.clean_hex(raw)
        tokens = self.remove_metadata(tokens)

        output = self.detokenize(tokens)
        return output

    def convert_string_to_8xp(self, string, output_path, program_name):
        """Turns a string into an 8xp

        Arguments:
         - string: A string containing an Axe program.  It doesn't matter if
                   string has newlines, tabs, extraneous whitespace, or colons
                   at the start of each line.
         - output_path: Where the txt file should be written to.
         - program_name: The name of the program.  If left blank, the
                         program will figure it out on its own, based on the
                         output_path.

        Returns:
         - Nothing
        """
        if not(program_name):
            program_name = os.path.split(output_path)[1]
            program_name = program_name[:program_name.find(".")][:8].upper()
        string = self.clean_string(string)
        tokens = self.tokenize(string)
        output = self.wrap_metadata(tokens, program_name)
        self.write_to_file(output, output_path, "wb")
        return

    def convert_string_to_token(self, string):
        """Converts string into a list of tokens (hex, represented by strings).

        Notes:
         - Two-byte vars will be be one item in the list, not two.

        Arguments:
         - string: The human-readable program text.
        """
        if string:
            string = self.clean_string(string)
            return self.tokenize(string)
        return []

    def convert_string_to_bytestring(self, string):
        """Converts string into a bytestream (basically, a list of integers)

        Notes:
         - Two-byte vars will be two items in the list, not one.

        Arguments:
         - string: The human-readable program text.
        """
        string = self.clean_string(string)
        tokens = self.tokenize(string)
        clean_tokens = self._simplify_tokens(tokens)
        output = [int(clean_tokens[x], 16) for x in range(0, len(clean_tokens))]
        return bytearray(output) 

    def refresh_string(self, string, indents):
        """Takes a string, and retokenizes it so it conforms to new settings.

        Arguments:
         - string: the string to refresh
         - indents: If True, cleans up indentation.
        """
        if string:
            if indents:
                string = self.clean_string(string)
                print string
            else:
                string = string.replace("\t", " " * self._tab_length)
            
            tokens = "".join(self.tokenize(string))
            clean_tokens = [tokens[x:x+2] for x in range(0, len(tokens), 2)]
            out = self.detokenize(clean_tokens, indents)
            print out
            return out
        else:
            return ""
    

    ########################################
    ### CONVERTING 8XP TO TXT
    ###
        
    def get_raw_hex(self, input_path):
        """Opens the 8xp, and returns the raw contents inside.

        Arguments:
         - input_path: A filepath to the 8xp file.

        Returns:
         - A bytearray
        """
        try:
            source = open(input_path, "rb")
        except IOError:
            raise IOError("Filepath does not exist.")
        raw = bytearray(source.read())
        source.close()
        return raw

    def clean_hex(self, bytearray_input):
        """Takes raw, open hex, and returns the clean program hex.

        Arguments:
         - A bytearray containing a bunch of hex.

        Returns:
         - A list containing strings 2 chars in length(letters capitalized)
        >>> parser.clean_hex(bytearray(b'Hello'))
        ['48', '65', '6C', '6C', '6F']
        """
        tokens = []
        for i, byte in enumerate(bytearray_input):
            byte = hex(byte)[2:].upper()
            if len(byte) == 1:
                byte = "0" + byte
            tokens.append(byte)
        return tokens
    
    def remove_metadata(self, tokens):
        """Removes all the metadata from a just-opened file.

        Arguments:
         - tokens: Input must be in this format:
        ['48', '65', '6C', '6C', '6F']

        (See Parser.clean_hex and Parser.axe_to_txt)
        """
        return tokens[74:-2]

    def detokenize(self, tokens, is_indenting_enabled=True):
        """8xp source to txt.

        Arguments:
         - tokens: Input must be stripped of all metadata and
                   in this format (list of strings):
                   ['48', '65', '6C', '6C', '6F']
        
        Returns
         - A string containing the fully decompiled text (with indents)
        """
        if self._detokenize_type == None:
            if tokens[0] == "3A":   # A period (.)
                force = "axe"
            else:
                force = "basic"
        else:
            force = self._detokenize_type
        
        print force
        print is_indenting_enabled
        resource = data.tokens_type[force]
        
        one_byte = True
        history = [] # Just in case somebody implements 3+ byte vars.
        line = []
        result = []
        indents = [0, 0, 0]
        is_end = False
        offset = 0
        
        if self._colons:
            tabcheck = -1
        else:
            tabcheck = None

        # Otherwise, the last line won't be tokenized.
        if tokens[-1] not in resource.end:
            tokens.append("3F")
        
        for i, byte in enumerate(tokens):
            if one_byte:
                if byte in resource.dictionary:
                    replacement = resource.dictionary[byte]
                    if type(replacement) == list:
                        if self._encoding == "ascii":
                            replacement = replacement[0]
                        else:
                            replacement = replacement[-1]
                    if type(replacement) == str:
                        line.append(replacement)
                        if is_end:
                            if byte in resource.specials:
                                for j in range(len(indents) - offset):
                                    indents[j+offset] += resource.specials[byte][j]
                            is_end = False
                        if byte in resource.end:
                            is_end = True
                            if resource.end[byte]:
                                offset = 0
                            else:
                                offset = 1
                    elif type(replacement) == dict:
                        history.append(byte)
                        one_byte = False
                        if is_end:
                            is_end = False
            else:
                replacement = resource.dictionary[history.pop()][byte]
                if type(replacement) == list:
                    if self._encoding == "ascii":
                        line.append(replacement[0])
                    else:
                        line.append(replacement[-1])
                else:
                    line.append(replacement)
                one_byte = True
            if is_end:
                if resource.end[byte]:
                    if self._colons:
                        line.append(":")
                    if is_indenting_enabled:
                        if [x for x in line[:tabcheck] if x not in self._whitespace_chars]:
                            result.append("\t" * indents[0])
                        
                    result.extend(line)
                    line = []
                    indents[0] = indents[1]
                    indents[1] = indents[2]
		
        final = "".join(result)
        final = final.replace("\t", " " * self._tab_length)
        
        # Add shortcuts
        for phrase, shortcut in resource.shortcuts.items():
            final = final.replace(phrase, shortcut)
        
        return final


    ########################################
    ### CONVERTING TXT TO 8XP
    ###

    def get_raw_string(self, input_path):
        """Opens the txt, and returns the raw contents inside.

        Arguments:
         - input_path: A filepath to the 8xp file.

        Returns:
         - A string
        """
        try:
            source = open(input_path, "r")
        except IOError:
            raise IOError("Filepath does not exist.")
        raw = "".join(source.read())
        source.close()
        return raw
    
    def clean_string(self, raw):
        """Removes extraneous whitespace and colons.

        Note: will remove colons from start of line only if
        ALL the lines start with a colon.

        Arguments:
         - raw: A string from the txt.

        Returns:
         - A string
        """
        temp = raw.split('\n')
        
        all_colons = True
        for line in temp:
            if not(line.startswith(':')):
                all_colons = False
                break
        
        new = []
        for line in temp:
            if all_colons:
                line = line[1:]
            new.append(line.strip())
        return '\n'.join(new)

    def tokenize(self, string):
        """Tokenizes a string.

        Arguments:
         - string: A string of Axe source

        Returns:
         - tokens: Input must be stripped of all metadata and
                   in this format (list of strings):
                   ['48', '65', '6C', '6C', '6F']
        """
        if self._detokenize_type == None:
            if string[:18] == ".0:Asm(prgmGRAMMER":
                # As far as I can tell, Grammer uses Basic tokens and
                # indentation rules.  I might make this more
                # special in the future.
                force = "basic"
            elif string[0] == ".":
                # Axe programs start with a period.
                force = "axe"
            else:
                force = "basic"
        else:
            force = self._detokenize_type
        resource = data.tokens_type[force]

        parse_size = resource.max_word_size

        output = []
        is_single = False
        start_single = ""

        while string:
            for i in range(parse_size):
                attempt = string[:parse_size - i]
                if attempt in resource.inverted_dict:
                    output.append(resource.inverted_dict[attempt])
                    string = string[parse_size - i:]
                    if is_single:
                        if attempt in resource.singles[start_single]:
                            is_single = False
                            parse_size = resource.max_word_size
                    else:
                        if attempt in resource.singles:
                            is_single = True
                            start_single = attempt
                            parse_size = 1
                    break
                # Skips unidentifiable chars.
                # TODO: add a line number counter and something to display warnings.
                if (i == (parse_size - 1)) and (attempt not in resource.inverted_dict):
                    string = string[parse_size - i:]
                    break
        return output

    def wrap_metadata(self, tokens, program_name):
        """Takes tokens and turns into the full 8xp.

        Arguments:
         - tokens: A list of hex values.  Must be in a similar format:
                   ["A1", "43", "C6"...]
         - program_name: The 8-char name of the program.  Must not start
                         with a number, and must be all uppercase.  No
                         special characters allowed.
                         
        Returns:
         - A bytearray
        """
        self._set_program_name(program_name)
        ### Constructing Variable Data Section ###
        tokens = "".join(tokens)
        num_tokens = self._make_two_bytes(len(tokens)/2)
        program = [tokens[x:x+2] for x in range(0, len(tokens), 2)]
        program = [int(program[x], 16) for x in range(0, len(program))]
        program = bytearray(program)
        variable_data = num_tokens + program

        ### Constructing Data Section ###
        weird_thing = bytearray([b'\x0b', 0]) # Either 0x0B00 or 0x0D00
        variable_data_length = self._make_two_bytes(len(variable_data))
        variable_type_id = bytearray([5]) #0x05 == program
        # Program name already set.
        version = bytearray(b'\x00')
        archive_flag = bytearray(b'\x00') # 0x80 if archived, 0x00 if not.
        data_ = weird_thing + variable_data_length + variable_type_id + self._program_name + version + archive_flag + variable_data_length + variable_data

        ### Constructing everything else.
        signature = bytearray(b'**TI83F*\x1a\x0a\x00')
        # Comment already set
        data_length = self._make_two_bytes(len(data_))
        checksum = self._make_two_bytes(sum(data_))
        packet = signature + self._comment + data_length + data_ + checksum

        # Hooray!
        return packet

    # METADATA:
    #
    # Sizes:
    #       Offset    Length  Description
    # 
    # Packet
    # |
    # | Header
    # | |   0         8       **TI83F*
    # | |   8         3       $1A $0A $00
    # | |   11        42      Comment added by detokenizer - padded with zeros or spaces.
    # | |   53        2       Length of data section.  Should be equal to filesize - 57
    # | End Header
    # |
    # | Data
    # | |   55        2       Always has a value of $0B or $0D (prefer $0B)
    # | |   57        2       Length of Variable Data Section
    # | |   59        1       Variable type ID ($05 for programs)
    # | |   60        8       Variable name (padded with zeros)
    # | |   68        1       Version, usually $00.  (Present if byte @ 55 == $0D)
    # | |   69        1       Archive flag.  $80 archived, $00 if not. (Present if byte @ 55 == $0D)
    # | |   70        2       Length of Variable Data Section (repeat of bytes @ 57)
    # | |
    # | | Variable Data
    # | | | 72        2       Number of tokens
    # | | | 74        n       Actual Data
    # | | End Variable Data
    # | |
    # | End Data
    # |
    # |    74 + n    2       Checksum (sum of Data section)
    # |
    # End Packet
    #

        
    ########################################
    ### INPUT/OUTPUT
    ###

    def write_to_file(self, string, path, out="w"):
        """Writes to file (with error checking)

        Note:
        The error checking isn't particularily rigorous here.

        Arguments:
         - string: The contents of the new file.
         - path: The location of the new file.
         - out [OPTIONAL]: A kludge (so I can write using bytes)
        """
        try:
            output = open(path, mode=out)
        except IOError:
            raise IOError("Could not create or write to output file")
        output.write(string)
        output.close()
        return

    ########################################
    ### BYTE MANIPULATION
    ###

    def _simplify_tokens(self, tokens):
        """Transforms any single-entry two-byte tokens into two entries.

        Arguments:
         - tokens: A list of strings (hex values)
        """
        tokens = "".join(tokens)
        clean_tokens = [tokens[x:x+2] for x in range(0, len(tokens), 2)]
        return clean_tokens
    
    def _make_valid_name(self, path):
        valid_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
        front = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
        # No theta, sorry.

        name = []
        for char in self._program_name:
            if char in valid_chars:
                name.append(char)
        if name[0] not in front:
            name[0] = "A"
        return "".join(name)

    def _make_two_bytes(self, integer):
        """Returns a bytearray.  Least significant first.

        Arguments:
         - integer: Any number

        Returns:
         - A bytearray (least significant digit goes first)"""
        temp = "000" + hex(integer)[2:]
        temp = temp[-4:]
        temp = [int(temp[2:], 16), int(temp[:2], 16)]
        return bytearray(temp)

    def _pad_or_truncate(self, string, length):
        """Takes a string, and cuts its size down to length, and pads.

        Arguments:
         - length: the length you want returned.

        Returns:
         - A bytearray (zero-padded if necessary)
        """
        temp = bytearray(string.encode("ascii"))
        temp.extend([0] * length)
        return temp[:length]

    ########################################
    ### INTERNAL GETTERS AND SETTERS
    ###
        
    def _set_comment(self, string):
        """Changes the 42-char comment at the start of each 8xp file."""
        self._comment = self._pad_or_truncate(string, 42)
        return self._comment

    def _set_program_name(self, string):
        """Changes the program name when creating an 8xp"""
        
        valid_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
        front = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
        # No theta, sorry.

        program_name = string.encode("ascii").upper()
        name = []
        for char in program_name:
            if char in valid_chars:
                name.append(char)
        if name[0] not in front:
            name[0] = "A"
        self._program_name = self._pad_or_truncate(program_name, 8)
        return self._program_name
    

        
        
        
