Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
code
ArXiv:
Libraries:
Datasets
Dask
License:

Pyarrow error when instantiating the dataloader

#7
by jrapin - opened

Hello,
I've been trying to instantiqte the dataloader with ds = load_dataset("bigcode/the-stack-dedup", split="train") (a month or so again I could instantiate the dataloader of "bigcode/the-stack" without error)
I've repeatedly had the error below, each time after exactly 318876775 examples, even after deleting the cache and restarting from scratch (3 times), and with several pyarrow versions (including latest 10.0.1).
Am I doing something wrong or is a file corrupted?
Below is the full stack trace. Thank you!

Generating train split: 318876775 examples [4:04:38, 21170.11 examples/s]---------------------------------------------------------------------------
ArrowInvalid                              Traceback (most recent call last)
File ~/.conda/envs/cg/lib/python3.8/site-packages/datasets/builder.py:1763, in ArrowBasedBuilder._prepare_split_single(self, arg)
   1762 _time = time.time()
-> 1763 for _, table in generator:
   1764     if max_shard_size is not None and writer._num_bytes > max_shard_size:
File ~/.conda/envs/cg/lib/python3.8/site-packages/datasets/packaged_modules/parquet/parquet.py:67, in Parquet._generate_tables(self, files)
     66 with open(file, "rb") as f:
---> 67     parquet_file = pq.ParquetFile(f)
     68     try:

File ~/.conda/envs/cg/lib/python3.8/site-packages/pyarrow/parquet/core.py:308, in ParquetFile.__init__(self, source, metadata, common_metadata, read_dictionary, memory_map, buffer_size, pre_buffe
r, coerce_int96_timestamp_unit, decryption_properties, thrift_string_size_limit, thrift_container_size_limit)
    307 self.reader = ParquetReader()
--> 308 self.reader.open(
    309     source, use_memory_map=memory_map,
    310     buffer_size=buffer_size, pre_buffer=pre_buffer,
    311     read_dictionary=read_dictionary, metadata=metadata,
    312     coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
    313     decryption_properties=decryption_properties,
    314     thrift_string_size_limit=thrift_string_size_limit,
    315     thrift_container_size_limit=thrift_container_size_limit,
    316 )
    317 self._close_source = getattr(source, 'closed', True)

File ~/.conda/envs/cg/lib/python3.8/site-packages/pyarrow/_parquet.pyx:1227, in pyarrow._parquet.ParquetReader.open()

File ~/.conda/envs/cg/lib/python3.8/site-packages/pyarrow/error.pxi:100, in pyarrow.lib.check_status()

ArrowInvalid: Parquet magic bytes not found in footer. Either the file is corrupted or this is not a parquet file.
                                                                                                                                                                                                   The above exception was the direct cause of the following exception:

DatasetGenerationError                    Traceback (most recent call last)
Input In [2], in <module>
      2 from pathlib import Path
      3 from datasets import load_dataset
----> 4 ds = load_dataset("bigcode/the-stack-dedup", split="train")

File ~/.conda/envs/cg/lib/python3.8/site-packages/datasets/load.py:1741, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verif
ications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, **config_kwargs)
   1738 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
   1740 # Download and prepare data
-> 1741 builder_instance.download_and_prepare(
   1742     download_config=download_config,
   1743     download_mode=download_mode,
   1744     ignore_verifications=ignore_verifications,
   1745     try_from_hf_gcs=try_from_hf_gcs,
   1746     use_auth_token=use_auth_token,
   1747     num_proc=num_proc,
   1748 )
   1750 # Build dataset for splits
   1751 keep_in_memory = (
   1752     keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
   1753 )

File ~/.conda/envs/cg/lib/python3.8/site-packages/datasets/builder.py:822, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
    820     if num_proc is not None:
    821         prepare_split_kwargs["num_proc"] = num_proc
--> 822     self._download_and_prepare(
    823         dl_manager=dl_manager,
    824         verify_infos=verify_infos,
    825         **prepare_split_kwargs,
    826         **download_and_prepare_kwargs,
    827     )
    828 # Sync info
    829 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())

File ~/.conda/envs/cg/lib/python3.8/site-packages/datasets/builder.py:913, in DatasetBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)
    909 split_dict.add(split_generator.split_info)
    911 try:
    912     # Prepare split will record examples associated to the split
--> 913     self._prepare_split(split_generator, **prepare_split_kwargs)
    914 except OSError as e:
    915     raise OSError(
    916         "Cannot find data file. "
    917         + (self.manual_download_instructions or "")
    918         + "\nOriginal error:\n"
    919         + str(e)
    920     ) from None

File ~/.conda/envs/cg/lib/python3.8/site-packages/datasets/builder.py:1650, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, num_proc, max_shard_size)
   1648 gen_kwargs = split_generator.gen_kwargs
   1649 job_id = 0
-> 1650 for job_id, done, content in self._prepare_split_single(
   1651     {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
   1652 ):
   1653     if done:
   1654         result = content

File ~/.conda/envs/cg/lib/python3.8/site-packages/datasets/builder.py:1795, in ArrowBasedBuilder._prepare_split_single(self, arg)
   1793     if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
   1794         e = e.__context__
-> 1795     raise DatasetGenerationError("An error occurred while generating the dataset") from e
   1797 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)

DatasetGenerationError: An error occurred while generating the dataset

Could you try installing the latest version of datasets and making sure the cache is clean. Otherwise, you could try to locate the file which generates the error, and we will see if we can load it on our side or if it's corrupted, see this issue.

This has been fixed in datasets>=2.13:)

lhoestq changed discussion status to closed

Sign up or log in to comment