from langchain.text_splitter import CharacterTextSplitter, Language, RecursiveCharacterTextSplitter, \
    MarkdownHeaderTextSplitter, TokenTextSplitter, SpacyTextSplitter, SentenceTransformersTokenTextSplitter


def split_by_string():
    with open("files/state_of_the_union.txt") as f:
        state_of_the_union = f.read()
    text_spliter = CharacterTextSplitter(separator="\n\n", chunk_size=1000, chunk_overlap=200, length_function=len)
    texts = text_spliter.create_documents([state_of_the_union])
    print(texts[0])
    metadatas = [{"document": 1}, {"document": 2}]
    documents = text_spliter.create_documents([state_of_the_union, state_of_the_union], metadatas=metadatas)
    print(documents[0])
    print(text_spliter.split_text(state_of_the_union)[0])

    text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
        chunk_size=100, chunk_overlap=0
    )
    texts = text_splitter.split_text(state_of_the_union)
    print(texts[0])

    text_splitter = TokenTextSplitter(chunk_size=10, chunk_overlap=0)

    texts = text_splitter.split_text(state_of_the_union)
    print(texts[0])

    text_splitter = SpacyTextSplitter(chunk_size=1000)
    texts = text_splitter.split_text(state_of_the_union)
    print(texts[0])

    splitter = SentenceTransformersTokenTextSplitter(chunk_overlap=0)
    text = "Lorem "
    count_start_and_stop_tokens = 2
    text_token_count = splitter.count_tokens(text=text) - count_start_and_stop_tokens
    print(text_token_count)


def split_by_code():
    languages = [e.value for e in Language]
    print(languages)
    PYTHON_CODE = """
    def hello_world():
        print("Hello, World!")

    # Call the function
    hello_world()
    """
    python_splitter = RecursiveCharacterTextSplitter.from_language(
        language=Language.PYTHON, chunk_size=50, chunk_overlap=0
    )
    python_docs = python_splitter.create_documents([PYTHON_CODE])
    print(python_docs)
    JS_CODE = """
    function helloWorld() {
      console.log("Hello, World!");
    }

    // Call the function
    helloWorld();
    """

    js_splitter = RecursiveCharacterTextSplitter.from_language(
        language=Language.JS, chunk_size=60, chunk_overlap=0
    )
    js_docs = js_splitter.create_documents([JS_CODE])
    print(js_docs)
    markdown_text = """
        # 🦜️🔗 LangChain

        ⚡ Building applications with LLMs through composability ⚡

        ## Quick Install

            ``` bash
                pip install langchain
            ```

        As an open source project in a rapidly developing field, we are extremely open to contributions.
    """
    md_splitter = RecursiveCharacterTextSplitter.from_language(
        language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0
    )
    md_docs = md_splitter.create_documents([markdown_text])
    print(md_docs)
    latex_text = """
    \documentclass{article}

    \begin{document}

    \maketitle

    \section{Introduction}
    Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis.

    \subsection{History of LLMs}
    The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance.

    \subsection{Applications of LLMs}
    LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics.

    \end{document}
    """
    latex_splitter = RecursiveCharacterTextSplitter.from_language(
        language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0
    )
    latex_docs = latex_splitter.create_documents([latex_text])
    print(latex_docs)
    html_text = """
    <!DOCTYPE html>
    <html>
        <head>
            <title>🦜️🔗 LangChain</title>
            <style>
                body {
                    font-family: Arial, sans-serif;
                }
                h1 {
                    color: darkblue;
                }
            </style>
        </head>
        <body>
            <div>
                <h1>🦜️🔗 LangChain</h1>
                <p>⚡ Building applications with LLMs through composability ⚡</p>
            </div>
            <div>
                As an open source project in a rapidly developing field, we are extremely open to contributions.
            </div>
        </body>
    </html>
    """
    html_splitter = RecursiveCharacterTextSplitter.from_language(
        language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0
    )
    html_docs = html_splitter.create_documents([html_text])
    print(html_docs)
    SOL_CODE = """
    pragma solidity ^0.8.20;
    contract HelloWorld {
       function add(uint a, uint b) pure public returns(uint) {
           return a + b;
       }
    }
    """

    sol_splitter = RecursiveCharacterTextSplitter.from_language(
        language=Language.SOL, chunk_size=128, chunk_overlap=0
    )
    sol_docs = sol_splitter.create_documents([SOL_CODE])
    print(sol_docs)


def split_by_markdown_header():
    markdown_document = "# Foo\n\n    ## Bar\n\nHi this is Jim\n\nHi this is Joe\n\n ### Boo \n\n Hi this is Lance \n\n ## Baz\n\n Hi this is Molly"

    headers_to_split_on = [
        ("#", "Header 1"),
        ("##", "Header 2"),
        ("###", "Header 3"),
    ]

    markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
    md_header_splits = markdown_splitter.split_text(markdown_document)
    for split in md_header_splits:
        print(split)

    markdown_document = "# Intro \n\n    ## History \n\n Markdown[9] is a lightweight markup language for creating formatted text using a plain-text editor. John Gruber created Markdown in 2004 as a markup language that is appealing to human readers in its source code form.[9] \n\n Markdown is widely used in blogging, instant messaging, online forums, collaborative software, documentation pages, and readme files. \n\n ## Rise and divergence \n\n As Markdown popularity grew rapidly, many Markdown implementations appeared, driven mostly by the need for \n\n additional features such as tables, footnotes, definition lists,[note 1] and Markdown inside HTML blocks. \n\n #### Standardization \n\n From 2012, a group of people, including Jeff Atwood and John MacFarlane, launched what Atwood characterised as a standardisation effort. \n\n ## Implementations \n\n Implementations of Markdown are available for over a dozen programming languages."

    headers_to_split_on = [
        ("#", "Header 1"),
        ("##", "Header 2"),
    ]

    # MD splits
    markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
    md_header_splits = markdown_splitter.split_text(markdown_document)

    # Char-level splits
    chunk_size = 10
    chunk_overlap = 0
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)

    # Split within each header group
    all_splits = []
    all_metadatas = []
    for header_group in md_header_splits:
        _splits = text_splitter.split_text(header_group['content'])
        _metadatas = [header_group['metadata'] for _ in _splits]
        all_splits += _splits
        all_metadatas += _metadatas
    print(all_splits[0])
    print(all_metadatas[0])


if __name__ == '__main__':
    split_by_string()