import re
import itertools
from nltk.tokenize import word_tokenize as nltk_word_tokenize
from pythainlp.tokenize import word_tokenize as pythainlp_word_tokenize

class SmsThLoan0V1Tokenizer:
    
    def _process_url(self, text):
        url_pattern = r'(https?://|www\.|bit\.ly|blt\.ly|shorturl)?([\w\./\u0E00-\u0E7F\-?=]+)(\.com)?'

        matches = re.findall(url_pattern, text)
        filtered_matches = [(prefix, domain, suffix) for prefix, domain, suffix in matches if
                            prefix not in ('bit.ly', 'blt.ly', 'shorturl')]
        processed_urls = []
        for match in filtered_matches:
            prefix, domain, suffix = match
            if prefix in ('http://', 'https://','www.'):
                domain = re.split(r'/', domain, 1)[0]
            domain = re.sub(r'^(www\.|bit\.ly|blt\.ly|shorturl)', '', domain)
            domain = re.sub(r'\.com$', '', domain)
            split_url = re.split(r'[/.]', domain)
            filtered_parts = split_url
            if prefix in ('http://', 'https://','www.'):
                filtered_parts = [part for part in split_url if len(part) > 2]
            if filtered_parts:
                processed_urls.append(filtered_parts)
        flat_processed_urls = list(itertools.chain.from_iterable(processed_urls))

        return flat_processed_urls


    def _split_english_thai(self, text):
        english_part = ''.join(re.findall(r'[A-Za-z0-9]+', text))
        thai_part = ''.join(re.findall(r'[\u0E00-\u0E7F]+', text))
        return english_part, thai_part

    def seg_content(self, text):
        flat_processed_urls = self._process_url(text)

        part_list = []
        result_tokens = []

        for word in flat_processed_urls:
            english_part, thai_part = self._split_english_thai(word)
            if english_part:
                part_list.append(['en', english_part])
            if thai_part:
                part_list.append(['th', thai_part])

        if part_list:
            current_idx = 0
            for i in range(1, len(part_list)):
                if part_list[i][0] != part_list[i-1][0]:
                    seg_input = ' '.join(x[1] for x in part_list[current_idx:i])
                    if part_list[current_idx][0] == 'en':
                        result_tokens.extend(nltk_word_tokenize(seg_input))
                    elif part_list[current_idx][0] == 'th':
                        result_tokens.extend(pythainlp_word_tokenize(seg_input))
                    current_idx = i
            seg_input = ' '.join(x[1] for x in part_list[current_idx:len(part_list)])
            if part_list[current_idx][0] == 'en':
                result_tokens.extend(nltk_word_tokenize(seg_input))
            if part_list[current_idx][0] == 'th':
                result_tokens.extend(pythainlp_word_tokenize(seg_input))
        filtered_tokens = [token for token in result_tokens if not token.isspace() and len(token) > 1]
        return filtered_tokens