from pathlib import Path
import json
db = json.loads((Path.cwd() / '.tmp' / 'db.json').read_text())
import time
import logging
import sys
P = logging.getLogger()
P.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
# to send to file
# handler = logging.FileHandler('hi.log')
handler.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
P.addHandler(handler)
# logging.basicConfig(filename='hi.log', level=logging.INFO)

class S:                        # the Screen UI class
    BL = '\033[94m'
    CY = '\033[96m'
    GR = '\033[92m'
    RD = '\033[91m'
    NR = '\033[0m'

    def print(s, *args):
        P.info(s.format(S.BL, S.NR, *args))
    def sleep(n):
        time.sleep(n)

class M():                      # the Main program class body
    def __init__(self, nm: Path, # name of the .m file
                 hm: str, # hash of the .m file
                 db_path: Path,  # json of o[nm][hm][0]
                 template_path : Path, # template for the prompt
                 header_path : Path,
                 m_api_doc_path : Path,  # json of o[<func>]
                 c_api_doc_path : Path,   # json of c[<abstract-c-filename>]
                 dry_run : bool = False
                 ):

        self.dry_run = dry_run  # whether to run in dry-run mode
        self.db = json.loads(db_path.read_text())
        self.db_m_doc = json.loads(m_api_doc_path.read_text())
        self.db_c_doc = json.loads(c_api_doc_path.read_text())
        self.template = template_path.read_text()  # template for the prompt
        S.print('0.1 🦜 : db loaded from {0}{2}{1}: size = {0}{3}{1}', db_path, len(self.db))
        S.print('0.2 🦜 : .m doc db loaded from {0}{2}{1}: size = {0}{3}{1}', m_api_doc_path , len(self.db_m_doc))
        S.print('0.3 🦜 : .c doc db loaded from {0}{2}{1}: size = {0}{3}{1}', c_api_doc_path , len(self.db_c_doc))


        if nm not in self.db or hm not in self.db[nm]:
            raise ValueError(f".m name={nm} with hash={hm} not found in the database.")
        p : str = self.db[nm][hm][0] # the database, db[nm][hm] = list[path]
        self.p : Path = Path(p)  # path-to-.m-file
        S.print('1.1 🦜 : path-to-.m-file loaded from db: {0}{2}{1}', self.p)
        S.print('1.2 🦜 : rendering .h files from {0}{2}{1}', header_path)
        self.h : str = U.render_headers_in_folder(header_path)  # rendered_headers
        P.debug(f'Rendered .h prompt: {self.h}')

    async def m1(self, model: str, base_url: str, api_key: str,
                 max_attempts: int = 1,  # max attempts to retry
                 mock_lm: bool = False,  # whether to mock the LM response
                 o1 = None):  # a module loop <2025-05-27 Tue>

        S.print('3.1 🦜 : Trying to fix the path-to-.m file')
        po = A.f2(self.p)       # mangle: po = path-to-mangled-.m-file
        P.info(f'\tBefore mangling: {S.BL}{self.p}{S.NR}\n\tAfter mangling:  {S.GR}{po}{S.NR}')

        P.info('4. 🦜: Trying to get .c and .h from .m')
        p1, p2 = A.f1(self.p)  # get_ch_path_from_m_path: p1, p2 = path-to-.h-file, path-to-.c-file
        P.info(f'\t.c file: {S.GR}{p2}{S.NR}')
        P.info(f'\t.h file: {S.BL}{p1}{S.NR}')
        s, s1, s2 = self.p.read_text(), p1.read_text(), p2.read_text()
        S.print('Read {0}{2},{3},{4}{1} bytes from .m, .h, and .c file', len(s), len(s1), len(s2))

        P.info('\t4.1. : Modifying header content')  # f7
        s1 = A.f7(s1)  # modify header content
        P.debug(f'\n\t\tBefore header content:\n{S.CY}{s1}{S.NR}')
        P.debug(f'\n\t\tModified header content:\n{S.GR}{s1}{S.NR}')

        P.info('\t4.2. : Checking t in .c file')  # f6
        has_t = A.f6(p2, s2)  # check whether the code has `_t` symbol
        P.debug(f'\t\tHas `_t` symbol in .c file: {S.GR}{has_t}{S.NR}')

        P.info('\t4.3. : Getting d9: matlab functions')  # f6
        m_fs : set[str] = A.f9(s)  # extract_m_functions
        P.debug(f'\t\tExtracted m functions: {S.CY}{m_fs}{S.NR}')
        P.info('\t4.4. : Getting d8: matlab-api-doc-prompt')
        m_doc : str = A.p2_get_m_doc_prompt(m_fs, self.db_m_doc)  # make_m_doc_prompt
        P.info(f'\t4.5. : Getting d10: c-api-doc-prompt')
        c_doc : str = A.p1_get_c_doc_prompt(m_fs, self.db_c_doc)  # make_c_doc_prompt

        n, p0, o0, m0 = 0, '', '', ''  # n-try, previous prompt, previous lm output, previous judgement result
        while True:
            n += 1

            x = f'{S.CY}[Retry-{n}/{max_attempts}]{S.NR} '  # prefix
            P.info(f'{x}5. Calling the make prompt API ')
            P.debug(f'\t\tPrevious prompt: {S.CY}{p0}{S.NR}')
            P.debug(f'\t\tPrevious lm output: {S.CY}{o0}{S.NR}')
            P.debug(f'\t\tPrevious judgement result: {S.CY}{m0}{S.NR}')
            p0 = A.f3_make_prompt(p=self.p,p1=p1,p2=p2,
                                  s=s, s1=s1,m_doc=m_doc, c_doc=c_doc,
                                  rendered_headers=self.h,
                                  template=self.template,
                                  p0=p0, o0=o0, m0=m0, dry_run=self.dry_run)  # make_prompt
            P.debug(f'\tGenerated Prompt: {S.CY}{p0}{S.NR}')

            P.info(f'{x}6. Calling LM')
            if mock_lm:  # if we mock the LM response
                r : str = await U.ask_lm_mk(p0, model, base_url, api_key)  # ask_lm_mk
            else:
                r = await U.ask_lm(p0, model, base_url, api_key)  # ask_lm
            P.debug(f'\tLM response: {S.CY}{r}{S.NR}')
            S.sleep(1)  # simulate some delay

            P.info(f'{x}7. Parsing the last code block')
            r = U.get_last_code_block(r)  # get_last_code_block
            P.debug(f'\tExtracted last code block: {S.CY}{r}{S.NR}')

            P.info(f'\t{x}7.1. Improving the c code')
            r = A.f4(r,has_t=has_t)         # improve_c_code
            P.debug(f'Improved C-code:\n{S.GR}{r}{S.NR}')

            P.info(f'\t{x}7.2. Judging the c code')
            ok, m0 = A.f5(r)  # judge_c_code
            if ok:
                P.info(f'✅️ {S.GR}The C code is valid{S.NR}, we\'re done~')
                U.tidy_print(r)
                break
            else:
                P.debug(f'⚠️ {S.RD}The C code is invalid{S.NR}, judgement message: {S.CY}{m}{S.NOR}')

            # 🦜 : for now, let's mock the break logic
            if n >= max_attempts:  # if we reach the max attempts, break
                P.info(f'❌️. Max Attempts reached = {S.RD}{max_attempts}{S.NR}, bye bye 👋')
                break

class A():                      # 🦜: The API method 'namespace', this should be filled properly by third-party packages/libraries provider/implementers

    @staticmethod
    def f1(p: Path) -> tuple[Path, Path]:  # get_ch_path_from_m_path
        # (.m path) -> (.h path, .c path)
        # --------------------------------------------------
        # mock 🦜 : just replace the ending .m with .h and .c
        return p.with_suffix('.h'), p.with_suffix('.c')

    @staticmethod
    def f2(p: Path) -> Path:    # mangle
        # mangle the path ('change some components')
        return p

    @staticmethod
    def f3_make_prompt(p: Path, p1: Path, p2: Path, s: str, s1: str, # make_prompt
                       m_doc : str, c_doc: str, rendered_headers: str,
                       template: str,
                       p0: str = '', o0: str = '', m0: str = '', dry_run = True) -> str:
        """
        p: .m path ; p1: .h path ; p2: .c path ; s: .m source code ; s1: .h source code
        m_doc: .m doc prompt; c_doc: .c doc prompt; rendered_headers: ~
        p0: previous prompt ; o0: previous lm output ; m0: previous judgement result

        return the prompt string
        """
        if dry_run:
            return 'hi, can you write a hello world C program?'
        else:
            # 2. substitude
            return template.format(
                m_name = p.name, m_code = s,
                h_name = p1.name, h_code = s1,
                m_doc = m_doc, c_doc = c_doc,
                rendered_headers = rendered_headers,
                c_name = p2.name
            )

    @staticmethod
    def f4(s: str, has_t: bool) -> str: # improve_c_code
        '''<generated-c-code> -> <improved-c-code> '''
        return s
    @staticmethod
    def f5(s: str) -> tuple[bool, str]: # judge_c_code
        '''<processed-c-code> -> (bool, judgement message)'''
        return True, 'ok'
    @staticmethod
    def f6(p:Path, s:str) -> bool: # check whether the code has `_t` symbol
        '''p: .c path, s: content of p'''
        return True
    @staticmethod
    def f7(s: str) -> str:      # modify header content
        return s
    @staticmethod
    def f8(s: str) -> set[str]:  # guess_c_abstract_functions
        '''(m func) -> (c abstract functions)'''
        return set(['mocked_f1c', 'mocked_f2c'])
    @staticmethod
    def f9(s: str) -> set[str]:  # extract_m_functions
        '''(m-src -> m_function)'''
        return set(['mocked_f1m', 'mocked_f2m'])
    @staticmethod
    def f10_make_c_doc_prompt(d: dict[str,dict]) -> str:  # make_c_doc_prompt
        '''d: filtered c_api_doc.json'''
        return '<mocked-c-doc-prompt>'
    def f11_make_m_doc_prompt(d: dict[str,dict]) -> str:  # make_c_doc_prompt
        '''d: filtered m_api_doc.json'''
        return '<mocked-m-doc-prompt>'

    @staticmethod
    def p1_get_c_doc_prompt(l : set[str], db : dict[str,dict]) -> str:
        '''l : list of m functions, db: c_api_doc.json'''
        # 1. for each m func get the abstract c funcs
        o = set().union(*[A.f8(x) for x in l])  # union of all abstract functions
        # 2. get the filtered db and pass to c doc
        return A.f10_make_c_doc_prompt({k: db[k] for k in o})  # make_c_doc_prompt
    @staticmethod
    def p2_get_m_doc_prompt(l : set[str], db : dict[str,dict]) -> str:
        '''l : list of m functions, db: m_api_doc.json'''
        return A.f11_make_m_doc_prompt({k: db[k] for k in l})


from openai import AsyncOpenAI
class U():                      # the utils
    @staticmethod
    def render_headers(l: list[Path]) -> str:
        """
        Render a list of .h Path objects as a string with each path on a new line.
        """
        def render_header(p: Path):
            return f'```c\n{p.read_text()}\n```'
        return '\n\n'.join(render_header(p) for p in l)
    @staticmethod
    def render_headers_in_folder(f: Path) -> str:
        return U.render_headers(f.glob('*.h'))  # render all .h files in the folder
    @staticmethod
    def get_last_code_block(s: str) -> str:  # u1
        """
        Extract the last code block from a string.
        Assumes the code block is enclosed in triple backticks.
        """
        s = s.splitlines()
        # S.print('🦜 : u1, {0}{2}{1} lines found: {0}{3}{1}', len(s), s)
        while s and s[-1].strip() != '```':
            s.pop()

        s.pop()                 # pop the last ```
        # S.print('🦜 : u1, {0}{2}{1} lines left: {0}{3}{1}', len(s), s)
        o = []
        while s and not s[-1].strip().startswith('```'):
            o.append(s.pop())
            # S.print('🦜 : u1, push line: {0}{2}{1}',o)
        o = '\n'.join(reversed(o))
        # S.print('🦜 : u1, o:\n{0}{2}{1}',o)
        return o
    @staticmethod
    async def ask_lm(s: str, model: str, base_url: str, api_key: str) -> str:  # u2
        '''
        Ask LM in OpenAI Format
        '''
        c = AsyncOpenAI(base_url=base_url, api_key=api_key)
        # 1. 🦜: not everybody implements /responses api... so maybe we should use the api of /chat
        # r = await c.responses.create(model=model, input=s)
        # return r.output_text
        # 2. /chat api
        r = await c.chat.completions.create(model=model, messages=[{"role": "user", "content": s}])
        return r.choices[0].message.content
    @staticmethod
    def tidy_print(s):
        print(f'✅️ {S.GR }Result c code {S.NR} --------------------------------------------------')
        print(f'{S.GR}\n{s}\n{S.NR}')
    @staticmethod
    async def ask_lm_mk(s: str, model: str, base_url: str, api_key: str) -> str:  # u2
        return '''
        This is a mocked responce
        ```c
int f(void){
     return 123;
    }
        ```
        '''
'''
Try ask_lm
'''
    # from pathlib import Path
    # import json
    # k = json.loads((Path.home() / '.ssh' / 'k-claude.json').read_text())
    # U.ask_lm('hi','gpt-4o-mini',k['url'], k['key'])
    # await U.ask_lm('hi', 'llama3.2','http://localhost:11434/v1','<mock-key>') # ✅️

import asyncio
async def main():

    m = M(nm = 'f0.m',
          hm = 'e9058ab198f6908f702111b0c0fb5b36f99d00554521886c40e2891b349dc7a1',
          db_path = Path.cwd() / '.tmp' / 'db.json',
          template_path = Path('/home/me/repo/msv2/front2/14/.tmp/mocked-template.txt'),
          header_path = Path('/home/me/repo/msv2/front2/14/.tmp'),
          m_api_doc_path = Path('/home/me/repo/msv2/front2/14/.tmp/mocked_matlab_api_doc.json'),
          c_api_doc_path = Path('/home/me/repo/msv2/front2/14/.tmp/mocked_c_api_doc.json'),   # json of c[<abstract-c-filename>]
          dry_run = False
          )
    # k = json.loads((Path.home() / '.ssh' / 'k-claude.json').read_text())
    await m.m1(model='',base_url='',api_key='',max_attempts=1,mock_lm=True)
    # await m.m1(model='gpt-4o-mini',base_url=k['url'],api_key=k['key'],max_attempts=1,mock_lm=False)
    # await m.m1(model='llama3.2',base_url='http://localhost:11434/v1',api_key='<mock-key>',max_attempts=1,mock_lm=False)
asyncio.run(main())
