
#ifndef LLM_TOKENIZER_H
#define LLM_TOKENIZER_H

#include "llm_config.h"
#include <cstring>
#include <future>
#include <iostream>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>

class string_view_ {
public:
    string_view_() : data_(nullptr), size_(0) {}
    string_view_(const char *data) : data_(data), size_(std::strlen(data)) {}
    string_view_(const char *data, std::size_t size) : data_(data), size_(size) {}
    string_view_(const std::string &str) : data_(str.data()), size_(str.size()) {}
    constexpr string_view_(const string_view_ &) noexcept = default;
    string_view_ &operator=(const string_view_ &) noexcept = default;
    const char &operator[](size_t pos) const { return data_[pos]; }
    constexpr const char *data() const noexcept { return data_; }
    constexpr std::size_t size() const noexcept { return size_; }
    constexpr bool empty() const { return size_ == 0; }
    std::string to_string() const { return std::string(data_, size_); }
    bool operator==(const string_view_ &other) const noexcept {
        return size_ == other.size_ && strncmp(data_, other.data_, size_) == 0;
    }
    void remove_prefix(size_t n) {
        if (n < size_) {
            data_ += n;
            size_ -= n;
        } else {
            data_ = "";
            size_ = 0;
        }
    }

private:
    const char *data_;
    std::size_t size_ = 0;
};

namespace std {
template <> class std::hash<string_view_> {
public:
    size_t operator()(const string_view_ &sv) const {
        size_t result = 0;
        for (size_t i = 0; i < sv.size(); ++i) {
            result = (result * 31) + static_cast<size_t>(sv[i]);
        }
        return result;
    }
};
} // namespace std

namespace mindspore {
class __attribute__((visibility("default"))) Tokenizer {
public:
    static constexpr int MAGIC_NUMBER = 430;
    enum TokenizerType { SENTENCEPIECE = 0, TIKTOIKEN = 1, BERT = 2, HUGGINGFACE = 3 };

    Tokenizer() = default;
    virtual ~Tokenizer() = default;

    static Tokenizer *CreateTokenizer(const std::string &filename);
    static Tokenizer *CreateTokenizer(const std::vector<char> &buffer);
    static void CreateTokenizerAsync(const std::string &filename);
    static void CreateTokenizerAsyncFromCfg(const LLMConfig &cfg);
    static Tokenizer *GetTokenizerAsyncResult();

    bool IsStop(int token);
    bool IsSpecial(int token);

    std::vector<int> Encode(const std::string &str);
    virtual std::string Decode(int id) = 0;

private:
    static std::future<Tokenizer *> future_tokenizer_;
    static Tokenizer *CreateTokenizerFromStream(std::istream &tok_stream);
    static std::mutex mtx_;

protected:
    virtual void LoadSpecial(std::istream &file);
    virtual bool LoadVocab(std::istream &file) = 0;
    virtual void Encode(const std::string &str, std::vector<int> &ids) = 0;

    std::vector<int> special_tokens_;
    std::vector<int> stop_tokens_;
    std::vector<int> prefix_tokens_;
};

class Tiktoken : public Tokenizer {
public:
    Tiktoken() = default;
    virtual std::string Decode(int id) override;

protected:
    virtual bool LoadVocab(std::istream &file) override;
    virtual void Encode(const std::string &str, std::vector<int> &ids) override;

    std::unordered_map<std::string, int> encoder_;
    std::vector<std::string> decoder_;
};

class Sentencepiece : public Tokenizer {
public:
    Sentencepiece() = default;
    virtual std::string Decode(int id) override;

protected:
    virtual bool LoadVocab(std::istream &file) override;
    virtual void Encode(const std::string &str, std::vector<int> &ids) override;

private:
    enum ModelType { UNIGRAM = 1, BPE = 2, WORD = 3, CHAR = 4 };
    enum PieceType { NORMAL = 1, UNKNOWN = 2, CONTROL = 3, USER_DEFINED = 4, UNUSED = 5, BYTE = 6 };
    struct SentencePiece {
        std::string piece;
        float score;
        PieceType type = PieceType::NORMAL;
        SentencePiece() {}
        SentencePiece(const std::string &p, float s, PieceType t) : piece(p), score(s), type(t) {}
    };
    using EncodeResult = std::vector<std::pair<string_view_, int>>;

private:
    // byte fall back enable
    bool byte_fall_back_ = true;
    // unknown id.
    int unk_id_ = 0;
    // pieces from model
    std::vector<SentencePiece> sentence_pieces_;
    // piece -> id map for normal pieces
    std::unordered_map<std::string, int> pieces_;
    // piece -> id map for control, unknown, and byte pieces
    std::unordered_map<std::string, int> reserved_id_map_;

private:
    float GetScore(int id) const;
    bool IsUnused(int id) const;
    bool IsControl(int id) const;
    int PieceToId(const std::string &w) const;
    std::string ByteToPiece(unsigned char c) const;
    EncodeResult BepEncode(string_view_ str, float alpha = 0.f);
};

class BertTokenizer : public Tiktoken {
public:
    BertTokenizer() = default;

protected:
    virtual void Encode(const std::string &str, std::vector<int> &ids) override;

private:
    std::vector<int> WordPiece(const std::string &token);
};

class HuggingfaceTokenizer : public Tokenizer {
    struct hash_pair_wstring {
        size_t operator()(const std::pair<std::wstring, std::wstring> &p) const {
            auto hash1 = std::hash<std::wstring>{}(p.first);
            auto hash2 = std::hash<std::wstring>{}(p.second);
            // If hash1 == hash2, their XOR is zero.
            return (hash1 != hash2) ? hash1 ^ hash2 : hash1;
        }
    };
    using BPERanks = std::unordered_map<std::pair<std::wstring, std::wstring>, int, hash_pair_wstring>;

public:
    HuggingfaceTokenizer() = default;
    virtual std::string Decode(int id) override;

protected:
    virtual bool LoadVocab(std::istream &file) override;
    virtual void Encode(const std::string &str, std::vector<int> &ids) override;

private:
    void BPE(const std::wstring &token, const BPERanks &bpe_ranks, std::vector<std::wstring> *result);

    BPERanks bpe_ranks_;
    std::unordered_map<uint8_t, wchar_t> b2u_;
    std::unordered_map<wchar_t, uint8_t> u2b_;
    std::unordered_map<std::string, int> encoder_;
    std::vector<std::string> decoder_;
};
} // namespace mindspore

#endif // LLM_TOKENIZER_H
