import csv
import itertools
import urlparse
import collections
import time
pages=[]
a=time.time()
with open('message.txt', 'rw') as csvfile:
    spamreader = csv.reader(csvfile, delimiter=':', quotechar='|')
    for row in spamreader:
        if len(row):
            _url=urlparse.urlparse(row[15])
            if _url.path.find('/page/')>-1:
                pages.append(_url.path)
print(collections.Counter(pages).most_common(10))
print(time.time()-a)