Datasets:
Delete app.py
Browse files
app.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
from huggingface_hub import HfApi, Repository
|
3 |
-
from datasets import Dataset
|
4 |
-
combined_file = "combined_checkpoints.csv"
|
5 |
-
chunk_size = 10000 # Możesz dostosować ten rozmiar w zależności od dostępnej pamięci
|
6 |
-
chunks = pd.read_csv(combined_file, chunksize=chunk_size)
|
7 |
-
|
8 |
-
df_list = []
|
9 |
-
for chunk in chunks:
|
10 |
-
# Tutaj możesz przeprowadzić dodatkowe przetwarzanie na każdym fragmencie
|
11 |
-
df_list.append(chunk)
|
12 |
-
|
13 |
-
# Łączenie fragmentów w jeden DataFrame
|
14 |
-
df_combined = pd.concat(df_list, ignore_index=True)
|
15 |
-
df_combined = pd.read_csv(combined_file)
|
16 |
-
print("Załadowano połączony zbiór danych.")
|
17 |
-
|
18 |
-
# Mieszanie danych
|
19 |
-
df_shuffled = df_combined.sample(frac=1).reset_index(drop=True)
|
20 |
-
|
21 |
-
# Podział danych na zbiory
|
22 |
-
train_ratio = 0.8
|
23 |
-
validation_ratio = 0.1
|
24 |
-
test_ratio = 0.1
|
25 |
-
|
26 |
-
train_size = int(train_ratio * len(df_shuffled))
|
27 |
-
validation_size = int(validation_ratio * len(df_shuffled))
|
28 |
-
|
29 |
-
df_train = df_shuffled[:train_size]
|
30 |
-
df_validation = df_shuffled[train_size:train_size + validation_size]
|
31 |
-
df_test = df_shuffled[train_size + validation_size:]
|
32 |
-
|
33 |
-
# Przygotowanie do przesłania na Hugging Face Hub
|
34 |
-
hf_username = "adowu"
|
35 |
-
hf_dataset_name = "polish_sentences"
|
36 |
-
token = "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"
|
37 |
-
repo_id = f"{hf_username}/{hf_dataset_name}"
|
38 |
-
|
39 |
-
# Tworzenie repozytorium na Hugging Face Hub
|
40 |
-
api = HfApi()
|
41 |
-
api.create_repo(token=token, repo_id=repo_id, repo_type="dataset", exist_ok=True)
|
42 |
-
|
43 |
-
# Przesyłanie danych
|
44 |
-
repo = Repository(local_dir=repo_id, clone_from=repo_id, use_auth_token=token)
|
45 |
-
datasets = {'train': Dataset.from_pandas(df_train), 'validation': Dataset.from_pandas(df_validation), 'test': Dataset.from_pandas(df_test)}
|
46 |
-
|
47 |
-
for split, dataset in datasets.items():
|
48 |
-
dataset.save_to_disk(f"{repo_id}/{split}")
|
49 |
-
repo.push_to_hub(commit_message=f"Add {split} split")
|
50 |
-
|
51 |
-
print(f"Zbiory danych zostały przesłane na Hugging Face Hub: https://huggingface.co/datasets/{repo_id}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|