# -*- coding: utf-8 -*-
"""
@Project: base_python
@File: gram.py
@Author: PC
@Date: 2025/06/05
@Description: 
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import string
from collections import Counter
def   cleanSentence(sentence):
    sentence = sentence.split(' ')
    sentence = [word.strip(string.punctuation+string.whitespace) for word in sentence]
    sentence = [word for word in sentence if len(word)>1 or (word.lower()=='a' or word.lower()=='i')]
    return sentence
def  cleanInput(content):
    content = re.sub('\n|[[\d+\]]', " ", content)
    content = bytes(content, 'utf-8')
    content = content.decode('ascii', 'ignore')
    content = content.upper()
    sentences = content.split('. ')
    return [cleanSentence(sentence) for sentence in sentences]

def  getNgramsFromSentence(content, n):
    output = []
    for i in range(len(content)-n+1):
        output.append(content[i:i+n])
    return output
# 定义一个函数，接收两个参数，第一个参数是字符串，第二个参数是ngra
def getNgrams(content, n):

    content = cleanInput(content)
    # ngrams = []
    # for sentence in content:
    #     newNgrams = getNgramsFromSentence(sentence, n)
    #     ngrams.extend(newNgrams)
    # return ngrams
    # return ngrams    
    ngrams = Counter()
    for  sentence in content:
        newNgrams = [' '.join(ngrams) for ngrams in getNgramsFromSentence(sentence, n)]
        ngrams.update(newNgrams)
    return ngrams
# html = urlopen('http://en.wikipedia.org/wiki/Python_(programming_language)')
html = urlopen('http://en.wikipedia.org/wiki/Python_(programming_language)')
bs = BeautifulSoup(html, 'html.parser')
content = bs.find('div', {'id':'mw-content-text'}).get_text()
ngrams = getNgrams(content, 2)
print(ngrams)
print('2-grams count is: '+str(len(ngrams)))