File size: 4,638 Bytes
4ce09e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import time
import os
from datasets import load_dataset
import pickle


# Define a helper function to load datasets with retry mechanism
def load_dataset_with_retries(dataset_name, *args, retries=3, wait=5, **kwargs):
    for attempt in range(retries):
        try:
            return load_dataset(dataset_name, *args, **kwargs)
        except Exception as e:
            print(f"Attempt {attempt + 1} failed for {dataset_name}. Error: {e}")
            if attempt < retries - 1:
                time.sleep(wait)
            else:
                raise


# Checkpoint file to save progress
checkpoint_file = 'tunisian_data_checkpoint.txt'
dataset_count_path = 'data_count.pkl'

# Load progress if checkpoint exists
if os.path.exists(checkpoint_file):
    with open(checkpoint_file, 'r') as f:
        final_data = eval(f.read())
else:
    final_data = []

if os.path.exists(dataset_count_path):
    # Loading the variable back
    with open(dataset_count_path, 'rb') as f:
        loaded_data = pickle.load(f)
        datasets_completed = loaded_data['datasets_completed']
else:
    datasets_completed = 0


# Helper function to save progress to a checkpoint file
def save_checkpoint(data):
    with open(checkpoint_file, 'w') as f:
        f.write(str(data))


def save_datasets_completed(num):
    # Saving the variable to a file
    with open(dataset_count_path, 'wb') as f:
        pickle.dump({'datasets_completed': num}, f)


# Load and process datasets
try:
    if datasets_completed < 1:
        ds_xp3x = load_dataset_with_retries("Muennighoff/xP3x", "aeb_Arab", trust_remote_code=True)
        final_data.extend(list(sentence['targets'] for sentence in ds_xp3x['train']))
        save_checkpoint(final_data)
        datasets_completed += 1

    if datasets_completed < 2:
        ds_glotcc = load_dataset_with_retries("cis-lmu/glotcc-v1", name="aeb-Arab", split="train")
        final_data.extend(list(sentence['content'] for sentence in ds_glotcc))
        save_checkpoint(final_data)
        datasets_completed += 1

    if datasets_completed < 3:
        ds_flores = load_dataset_with_retries('facebook/flores', 'aeb_Arab')
        final_data.extend(list(sentence['sentence'] for sentence in ds_flores['dev']))
        final_data.extend(list(sentence['sentence'] for sentence in ds_flores['devtest']))
        save_checkpoint(final_data)
        datasets_completed += 1

    if datasets_completed < 4:
        ds_glotstory = load_dataset_with_retries('cis-lmu/GlotStoryBook', 'default', split='train')
        glotstory_sentences = [sentence for sentence in ds_glotstory if sentence["Language"] == 'aeb']
        final_data.extend(glotstory_sentences)
        save_checkpoint(final_data)
        datasets_completed += 1

    if datasets_completed < 5:
        ds_sib200 = load_dataset_with_retries('Davlan/sib200', 'aeb_Arab')
        final_data.extend(list(sentence['text'] for sentence in ds_sib200['train']))
        final_data.extend(list(sentence['text'] for sentence in ds_sib200['validation']))
        final_data.extend(list(sentence['text'] for sentence in ds_sib200['test']))
        save_checkpoint(final_data)
        datasets_completed += 1

    if datasets_completed < 6:
        ds_xsimplus = load_dataset_with_retries("jaygala24/xsimplusplus", "aeb_Arab")
        final_data.extend(list(sentence['query'] for sentence in ds_xsimplus['dev']))
        final_data.extend(list(sentence['query'] for sentence in ds_xsimplus['devtest']))
        save_checkpoint(final_data)
        datasets_completed += 1

    if datasets_completed < 7:
        ds_gentai = load_dataset_with_retries("gentaiscool/bitext_sib200_miners", "eng_Latn-aeb_Arab")
        final_data.extend(list(sentence['sentence2'] for sentence in ds_gentai['train']))
        save_checkpoint(final_data)
        datasets_completed += 1

    if datasets_completed < 8:
        dataset_reddit = load_dataset_with_retries('dataverse-scraping/reddit_dataset_219', split='train',
                                                   streaming=True)


        def filter_function(example):
            return example['communityName'] == 'r/Tunisia'  # Replace with your filter condition


        filtered_dataset = dataset_reddit.filter(filter_function)
        final_data.extend(list(sentence['text'] for sentence in filtered_dataset))
        save_checkpoint(final_data)
        datasets_completed += 1

    # Final save to a text file
    with open('tunisian_data.txt', 'w') as f:
        for line in final_data:
            f.write(f"{line}\n")

except Exception as e:
    print(f"An error occurred: {e}. Progress saved.")
    save_checkpoint(final_data)