from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import string
from collections import OrderedDict

def gram(content, n):
    input = clean(content)
    output = dict()
    for i in range(len(input)-n+1):
        newNGram = " ".join(input[i:i+n])
        if newNGram in output:
            output[newNGram] += 1
        else:
            output[newNGram] = 1
    return output

def clean(content):
    content = re.sub('\n+'," ",content)
    content = re.sub("\[[0-9]*\]","",content)
    content = re.sub(" +"," ",content)
    content = bytes(content,"UTF-8")
    content = content.decode("ascii","ignore")
    cleanInput = []
    content = content.split(" ")
    for item in content:
        item = item.strip(string.punctuation)
        if(len(item) > 1 or (item.lower()) == 'a' or item.lower() == 'i'):
            cleanInput.append(item)

        if(len(cleanInput) > 100):
            break

    return cleanInput



html = urlopen("http://en.wikipedia.org/wiki/Python_(programming_language)")
bs = BeautifulSoup(html,"html.parser")
content = bs.find("div",{"id":"mw-content-text"}).get_text()
ngrams = gram(content,2)

print(ngrams)

sortData = sorted(ngrams.items(),key=lambda t:t[1],reverse=True)
print(sortData)

print('*' * 100)

orders = OrderedDict(sortData)
print(orders)