import time
from typing import Iterable

import pandas as pd
from pyspark import Row


def spark_rows_to_df(rows: Iterable[Row]):
    fields_names = []
    values = {}
    for row in rows:
        if len(fields_names) == 0:
            fields_names = row.__fields__
            for f in fields_names:
                values[f] = []
        for i in range(len(row)):
            values[fields_names[i]].append(row[i])
    df = pd.DataFrame(values)
    return df
