import csv
import os
from concurrent.futures import as_completed
from concurrent.futures.process import ProcessPoolExecutor
import subprocess
from typing import List

import r2pipe as r2pipe
from tqdm import tqdm
from elftools.elf.elffile import ELFFile
import logging
import json

# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

invalid_opcodes_table = [False, False, False, False, False, False, True, True,
                         False, False, False, False, False, False, True, False,
                         False, False, False, False, False, False, True, True,
                         False, False, False, False, False, False, True, True,
                         False, False, False, False, False, False, True, True,
                         False, False, False, False, False, False, True, True,
                         False, False, False, False, False, False, True, True,
                         False, False, False, False, False, False, True, True,
                         True, True, True, True, True, True, True, True, True,
                         True, True, True, True, True, True, True, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, True, True, True, False, True, True, True,
                         True, False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, True, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False, True,
                         True, False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, True, True, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, True, True,
                         True, False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False, False,
                         False, False, False, False, False, False]


def run_extractor(input_files: List[str], outdir: str, openc: bool,
                  jobs: int) -> None:
    """
    Extracts the data from binary files, either as a list of function
    opcodes or just the raw .text section.
    :param input_files: A list of string, each string representing a path to a
    binary file.
    :param outdir: The directory where the extracted data should be written.
    The same filename of the input_files will be used, with a .txt appended
    in case of opcode encoded analysis or .bin otherwise.
    :param openc: true if opcode encoded analysis is requested. This particular
    type of analysis uses the output of disassembly instead of plain raw bytes.
    :param jobs: maximum number of jobs that will be spawned concurrently
    """
    if os.path.exists(outdir):
        if os.path.isdir(outdir):
            if os.access(outdir, os.W_OK):
                pass
            else:
                raise IOError(f"Folder {outdir} is not writable")
        else:
            raise IOError(f"{outdir} is not a folder")
    else:
        raise IOError(f"The folder {outdir} does not exist")

    if openc:
        extension = ".csv"
        f = extract_function_to_file
    else:
        extension = ".bin"
        f = extract_dot_text_to_file

    progress = tqdm(total=len(input_files), ncols=60)
    with ProcessPoolExecutor(max_workers=jobs) as executor:
        basenames = [(file, os.path.basename(file)) for file in input_files]
        args = [{'file': name[0],
                 'out_file': os.path.join(outdir, name[1] + extension)} for
                name in basenames]
        fut = {executor.submit(f, **arg) for arg in args}
        for _ in as_completed(fut, timeout=86400):
            progress.update(1)


def dot_text_name(r2: r2pipe) -> str:
    """
    Returns the name of the dot text section.
    The name of this session is slightly different for the various OSes.
    :param r2: Opened r2pipe, used to gather info about the binary file
    :return: A string with the .text name inside the binary.
    """
    info = r2.cmdj("ij")
    bint = info["bin"]["bintype"]
    if bint == "mach0":
        return "0.__TEXT.__text"
    elif bint == "elf" or bint == "pe":
        return ".text"
    else:
        raise ValueError(f"Unknown file format {bint}")


def extract_dot_text_to_file(file: str, out_file: str) -> None:
    """
    Extracts the raw .text section from a binary file and saves it to another
    file.
    :param file: path to the input file.
    :param out_file: The file where the dump will be saved.
    """
    try:
        # Initialize r2pipe with more robust settings
        r2 = r2pipe.open(file, [
            "-2",
            "-e bin.relocs.apply=true",
            "-e bin.cache=true",
            "-e io.cache=true",
            "-e anal.timeout=300",
            "-e bin.types=true"
        ])

        print(f"[DEBUG] Opening file: {file}")

        # First verify if we can analyze the file
        info = r2.cmdj("ij")
        if not info:
            print(f"[DEBUG] Failed to get binary info for {file}")
            return None
        
        print(f"[DEBUG] Binary type: {info.get('bin', {}).get('bintype')}")

        # Force analysis with timeout
        print("[DEBUG] Starting analysis...")
        r2.cmd("aaa")

        # Get sections info with error checking
        print("[DEBUG] Getting sections info...")
        sections = r2.cmdj("iSj")
        if not sections:
            print(f"[DEBUG] No sections found in {file}")
            return None
        
        # Get .text section name
        try:
            expected_name = dot_text_name(r2)
            print(f"[DEBUG] Looking for section: {expected_name}")
        except ValueError as e:
            print(f"[DEBUG] Error determining section name: {str(e)}")
            return None
        
        # Find and extract .text section
        text_section = None
        for section in sections:
            if section["name"] == expected_name:
                text_section = section
                break
        
        if not text_section:
            print(f"[DEBUG] Could not find {expected_name} section")
            print(f"[DEBUG] Available sections: {[s['name'] for s in sections]}")
            return None
        
        # Extract data in chunks
        try:
            address = text_section["vaddr"]
            total_size = text_section["size"]
            chunk_size = 1024 * 1024 # 1MB chunks
            
            print(f"[DEBUG] Extracting {expected_name} section: addr={hex(address)}, size={total_size}")

            with open(out_file, "wb") as f:
                for offset in range(0, total_size, chunk_size):
                    size = min(chunk_size, total_size - offset)
                    current_addr = address + offset

                    # Try different methods to extract data
                    try:
                        # Method 1: Using pxj
                        r2.cmd(f"s {current_addr}")
                        data = r2.cmdj(f"pxj {size}")
                        if data:
                            f.write(bytes(data))
                            continue

                        # Method 2: Using px
                        raw_bytes = r2.cmd(f"px {size} @ {current_addr}")
                        if raw_bytes:
                            # Parse hex dump and write
                            byte_lines = raw_bytes.strip().split('\n')[1:] # Skip header
                            for line in byte_lines:
                                hex_part = line.split('  ')[1].strip().split(' ')
                                bytes_data = bytes.fromhex(''.join(hex_part))
                                f.write(bytes_data)
                    
                    except Exception as chunk_error:
                        print(f"[DEBUG] Error extracting chunk at {hex(current_addr)}: {str(chunk_error)}")
                        continue

                    print(f"[DEBUG] Progress: {offset}/{total_size} bytes processed")

            print(f"[DEBUG] Successfully extracted to {out_file}")
        except Exception as extract_error:
            print(f"[DEBUG] Error during extraction: {str(extract_error)}")
            return None

    except Exception as e:
        print(f"[DEBUG] Critical error: {str(e)}")
        print(f"[DEBUG] Error type: {type(e)}")
    finally:
        try:
            r2.quit()
        except:
            pass    

def extract_dot_text_fallback(file: str) -> bytes:
    """
    Fallback methods to extract .text section when r2 fails
    """
    logging.info(f"Attempting fallback extraction for {file}")

    # Try executable LOAD segments first
    try:
        cmd = ['readelf', '-l', file]
        output = subprocess.check_output(cmd, stderr=subprocess.PIPE).decode()
        logging.debug(f"readelf -l output: \n {output}")

        # Look for executable LOAD segments
        for line in output.split('\n'):
            if "LOAD" in line and "R E" in line: # Found executable LOAD segment
                parts = line.split()
                for i, part in enumerate(parts):
                    if part == "LOAD":
                        offset = int(parts[i+1], 16) # Offset after LOAD
                        size = int(parts[i+4], 16)  # filesz field
                        logging.info(f"Found executable LOAD segment at offset {hex(offset)} size {hex(size)}")

                        # Use dd to extract the segment
                        dd_cmd =  f'dd if={file} bs=1 skip={offset} count={size} status=none'
                        data = subprocess.check_output(dd_cmd, shell=True)
                        logging.info("Successfully extracted LOAD segment data")
                        return data
        
        logging.debug("No executable LOAD segments found")
    except Exception as e:
        logging.debug(f"LOAD segment extraction failed: {str(e)}")

    # Then try elftools
    try:
        with open(file, 'rb') as f:
            elf = ELFFile(f)
            section = elf.get_section_by_name('.text')
            if section:
                logging.info("Successfully extracted using elftools")
                return section.data()
            logging.debug("No .text section found with elftools")
    except Exception as e:
        logging.debug(f"elftools extraction failed: {str(e)}")

    # Try readelf + dd for .text
    try:
        cmd = ['readelf', '-S', file]
        output = subprocess.check_output(cmd, stderr=subprocess.PIPE).decode()
        logging.debug(f"readelf -S output:\n{output}")

        for line in output.split('\n'):
            if '.text' in line:
                parts = line.split()
                for i, part in enumerate(parts):
                    if part == '.text':
                        offset = int(parts[i+4], 16)
                        size = int(parts[i+5], 16)
                        logging.info(f"Found .text section at offset {hex(offset)} size {hex(size)}")

                        dd_cmd = f'dd if={file} bs=1 skip={offset} count={size} status=none'
                        data = subprocess.check_output(dd_cmd, shell=True)
                        logging.info("Successfully extracted .text section data")
                        return data

        logging.debug("No .text section found in readelf output")
    except Exception as e:
        logging.debug(f"readelf+dd extraction failed: {str(e)}")

    # Final fallback: return the whole binary
    try:
        logging.warning(f"All extraction methods failed for {file} - using entire binary as fallback")
        with open(file, 'rb') as f:
            return f.read()
    except Exception as e:
        logging.error(f"Failed to read binary file: {str(e)}")
        return None

def extract_dot_text(file: str) -> List[bytes]:
    """
    Extracts and returns the raw .text section from a binary file.
    :param file: path to the input file.
    :return A bytearray containing the extracted data as a sequence of bytes.
    """
    data = None
    try:
        # First try r2pipe
        r2 = r2pipe.open(file, ["-2"])
        sections = r2.cmdj("iSj")
        expected_name = dot_text_name(r2)
        for section in sections:
            if section["name"] == expected_name:
                address = section["vaddr"]
                length = section["size"]
                r2.cmd("s " + str(address))
                data = r2.cmdj("pxj " + str(length))
                break
        r2.quit()
        if data:
            logging.info("Successfully extracted using r2pipe")
            return data
    except Exception as e:
        logging.debug(f"r2pipe extraction failed: {str(e)}")
    
    # If r2pipe failed, try fallback methods
    try:
        data = extract_dot_text_fallback(file)
        if data:
            return list(data)
    except Exception as e:
        logging.error(f"All extraction methods failed: {str(e)}")

    return data


def get_opcode(bytes: bytearray) -> bytearray:
    """
    Extracts the opcode from a statement, discarding prefixes and suffixes
    :param bytes: a bytearray containing the input statement
    :return: a bytearray containing the output opcode
    """
    # TODO: add ARM support. (x86 opcode encoding sucks, I won't study
    #  in-depth the ARM specification when naive approach is better)
    prev_0f = False
    # 0xF2 or 0xF3 if previous value was one of those
    # used to address the sequence 0xF20FXX where 0FXX is the opcode
    prev_f23 = 0x00
    for byte in bytes:
        if not prev_0f:
            if byte == 0x0F:
                prev_0f = True
                prev_f23 = 0x00
            elif byte == 0xF2 or byte == 0xF3:
                prev_f23 = byte
            elif not invalid_opcodes_table[byte]:
                if prev_f23 != 0x00:
                    # 0xF2 or 0xF3 was not followed by 0FXX
                    return bytearray([prev_f23])
                else:
                    return bytearray([byte])
            else:
                # prefix that should be discarded
                pass
        else:
            return bytearray([0x0F, byte])
    return bytearray([0xFF, 0xFF])


def extract_function_to_file(file: str, out_file: str) -> None:
    """
    Opens a file and extract every function opcodes. Saves the result in a .csv
    The output .csv contains a function for each row with the following fields:
    - virtual address offset (decimal) of the current function in the binary
    - function name
    - function length (in bytes)
    - function bytes
    - function bytes without prefixes or suffixes
    :param file: The input binary file
    :param out_file: The output csv containing the extracted data
    """
    try:
        r2 = r2pipe.open(file, ["-2"])
        r2.cmd("aaa")   # Analyze all

        # Get imports with error handling
        try:
            raw_imports = r2.cmd("iij")
            imports = json.loads(raw_imports) if raw_imports else []
        except json.JSONDecodeError:
            logging.warning(f"Failed to parse imports JSON for {file}")
            imports = []

        import_set = {imp["plt"] for imp in imports if isinstance(imp, dict) and "plt" in imp}

        # Get functions with error handling
        try:
            raw_functions = r2.cmd("aflj")
            functions = json.loads(raw_functions) if raw_functions else []
        except json.JSONDecodeError:
            logging.warning(f"Failed to parse imports JSON for {file}")
            functions = []

        rows = []
        if functions is not None:  # some files contain 0 functions
            for function in functions:
                if function["offset"] not in import_set:
                    try:
                        r2.cmd(f"s {function['offset']}")
                        raw_func = r2.cmd("pdrj")
                        func = json.loads(raw_func) if raw_func else None
                        if func is None:
                            continue
                        opcodes = bytearray()
                        raw_opcodes = []
                        for stmt in func:
                            if isinstance(stmt, dict) and "bytes" in stmt:  # invalid opcodes do not have "bytes"
                                opcodes_arr = get_opcode(
                                    bytearray.fromhex(stmt["bytes"]))
                                opcodes.extend(opcodes_arr)
                                raw_opcodes.append(stmt["bytes"])
                        rows.append(
                            [function["offset"], function["name"], function["size"],
                            ''.join(x for x in raw_opcodes),
                            ''.join(format(x, '02x') for x in opcodes)])
                    except (json.JSONDecodeError, KeyError) as e:
                        logging.warning(f"Failed to process function at offset {function.get('offset')} in {file}: {str(e)}")
        with open(out_file, "w") as fp:
            writer = csv.writer(fp, delimiter=",", quotechar='"',
                                quoting=csv.QUOTE_NONNUMERIC)
            writer.writerow(["offset", "name", "size", "raw", "opcodes"])
            writer.writerows(rows)
    except Exception as e:
        logging.error(f"Error processing {file}: {str(e)}")
    finally:
        try:
            r2.quit()
        except:
            pass

def extract_features(binary_path, sample_size=10*1024*1024): # 10MB sample
    try:
        r2 = r2pipe.open(binary_path)
        r2.cmd('e io.cache=true')
        r2.cmd('aa')

        text_section = next((s for s in r2.cmdj('iSj') if s['name'] == '.text'), None)
        if text_section:
            # Only analyze a sample of the .text section
            start = text_section['vaddr']
            size = min(sample_size, text_section['vsize'])
            data = r2.cmdj(f'pxj {size} @ {start}')
            # Process data...
    except Exception as e:
        logging.debug(f"Sampling error: {str(e)}")
        return None