import requests
import csv
import os
from multiprocessing import Pool
import time


def get_html(url):
    # time.sleep(2)  # замедление запроса на 2 сек
    result = requests.get(url)
    return result.text


def write_csv(data):
    path_f = os.path.dirname(os.path.abspath(__file__))
    with open(
            os.path.join(path_f, "websites_fast.csv"), "a", newline='', encoding='utf-8') as file_:
        order = ['name', 'url', 'descr', 'traffic', 'percent', 'status']
        writer_f = csv.DictWriter(file_, fieldnames=order)
        writer_f.writerow(data)


def get_page_data(text):
    data = text.strip().split('\n')[1:]  # сплитуем по переносу строки

    for row in data:
        columns = row.strip().split('\t')  # сплитуем по табуляции
        dictn = {
            'name': columns[0],
            'url': columns[1],
            'descr': columns[2],
            'traffic': columns[3],
            'percent': columns[4],
            'status': columns[5]
        }
        write_csv(dictn)
        # print(dic)


def make_join_all(url):
    text = get_html(url)
    get_page_data(text)


def main():  # всего страниц 6288
    url = 'https://www.liveinternet.ru/rating/ru//today.tsv?page={}'
    urls = [url.format(str(n)) for n in range(1, 6288)]

    with Pool(20) as worker:  # применение мультипроцессинга 20 шт
        worker.map(make_join_all, urls)


if __name__ == '__main__':
    start_time = time.time()
    main()
    print("--- %s seconds ---" % (time.time() - start_time))
