From d070d2e807ceff83a929b529789fa86bb9edba37 Mon Sep 17 00:00:00 2001 From: Paul Khudan Date: Mon, 31 May 2021 15:32:14 +0300 Subject: [PATCH 01/28] refactor pipeline. resolve task <-> sandbox relationship # Conflicts: # src/language_model/data/extract.py # src/language_model/data/load.py # src/language_model/modelling/trainer.py # src/language_model/tokenization/extend.py # src/language_model/tokenization/tasks.py # src/language_model/tokenization/trainer.py --- load.py | 13 ++++ run.py | 4 +- src/language_model/modelling/trainer.py | 6 +- src/language_model/pipeline.py | 72 +------------------ src/language_model/runner.py | 80 ++++++++++++++++++---- src/language_model/tokenization/trainer.py | 4 +- 6 files changed, 88 insertions(+), 91 deletions(-) create mode 100644 load.py diff --git a/load.py b/load.py new file mode 100644 index 0000000..c128c96 --- /dev/null +++ b/load.py @@ -0,0 +1,13 @@ +import argparse + +from language_model.runner import SandboxRunner + +DATA_FOLDER_PATH = "data" + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--task", default=None, type=str, required=True, help="Configuration file") + args = parser.parse_args() + runner = SandboxRunner(config_path=args.task, sandbox_root_path=DATA_FOLDER_PATH) + runner.run() diff --git a/run.py b/run.py index 07eccd8..ce474dd 100644 --- a/run.py +++ b/run.py @@ -1,10 +1,10 @@ import argparse -from language_model.runner import ConfigurationFileRunner +from language_model.runner import SandboxRunner if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--task", default=None, type=str, required=True, help="Configuration file") args = parser.parse_args() - runner = ConfigurationFileRunner(args.task) + runner = SandboxRunner(config_path=args.task) runner.run() diff --git a/src/language_model/modelling/trainer.py b/src/language_model/modelling/trainer.py index cdef92d..79f7a89 100644 --- a/src/language_model/modelling/trainer.py +++ b/src/language_model/modelling/trainer.py @@ -9,10 +9,10 @@ TrainingArguments, ) -from ..pipeline import SandboxTask +from ..pipeline import ITask -class RobertaForMaskedLMTrainTask(SandboxTask): +class RobertaForMaskedLMTrainTask(ITask): def __init__( self, file_path: str, @@ -44,7 +44,7 @@ def execute(self, environment_path: str) -> None: ) training_args = TrainingArguments( - output_dir=self.sandbox_folder_path, + output_dir=os.path.join(environment_path, "temp"), overwrite_output_dir=True, num_train_epochs=self.epochs, per_gpu_train_batch_size=self.batch_size_per_gpu, diff --git a/src/language_model/pipeline.py b/src/language_model/pipeline.py index 536cff8..113e6a8 100644 --- a/src/language_model/pipeline.py +++ b/src/language_model/pipeline.py @@ -1,76 +1,8 @@ -import os -import pathlib -import sys -from abc import ABC -from logging import INFO, FileHandler, Formatter, Logger, StreamHandler, getLogger -from typing import Sequence - -sys.path.append(str(pathlib.Path().absolute())) - -LOGGING_FORMAT: str = "%(asctime)s : %(levelname)s : %(module)s : %(message)s" -DEFAULT_LOG_DIR: str = "logs" -DEFAULT_LOG_FILE: str = "log.txt" -DEFAULT_CONFIGURATION_DIR: str = "configs" - - class ITask(object): def execute(self, environment_path: str) -> None: raise NotImplementedError() -class Sandbox(object): - def get_sandbox_folder_path(self) -> str: +class TaskRunner(object): + def run(self) -> None: raise NotImplementedError() - - -class AbstractSandbox(Sandbox): - def __init__(self, sandbox_folder_path: str) -> None: - self.sandbox_folder_path = sandbox_folder_path - - def get_sandbox_folder_path(self) -> str: - return self.sandbox_folder_path - - -class SandboxTask(ITask, AbstractSandbox, ABC): - def __init__(self, sandbox_folder_path: str = "outputs") -> None: - super().__init__(sandbox_folder_path) - - -def identifiers_from_config_file(filepath: str) -> Sequence[str]: - path = os.path.normpath(filepath) - path_components = path.split(os.sep) - path_components[-1] = os.path.splitext(path_components[-1])[0] - config_dir_index = path_components[:-1].index(DEFAULT_CONFIGURATION_DIR) - if config_dir_index != -1: - path_components = path_components[config_dir_index + 1 :] - return path_components - - -def init_logger(experiment_identifiers: Sequence[str], overwrite: bool = True, log_to_stderr: bool = False) -> Logger: - path_components = [DEFAULT_LOG_DIR] + list(experiment_identifiers) - log_path = os.path.join(*path_components) - return configure_logger(log_path, file=DEFAULT_LOG_FILE, overwrite=overwrite, log_to_stderr=log_to_stderr) - - -def configure_logger( - path: str = DEFAULT_LOG_DIR, file: str = DEFAULT_LOG_FILE, overwrite: bool = True, log_to_stderr: bool = False -) -> Logger: - logger = getLogger() - logger.setLevel(INFO) - - logging_path = os.path.join(path) - if not os.path.exists(logging_path): - os.makedirs(logging_path) - - formatter = Formatter(LOGGING_FORMAT) - - fh = FileHandler(os.path.join(logging_path, file), mode="w" if overwrite else "a", encoding="utf-8") - fh.setFormatter(formatter) - logger.addHandler(fh) - - if log_to_stderr: - sh = StreamHandler() - sh.setFormatter(formatter) - logger.addHandler(sh) - - return logger diff --git a/src/language_model/runner.py b/src/language_model/runner.py index 27feb3d..0045ba5 100644 --- a/src/language_model/runner.py +++ b/src/language_model/runner.py @@ -1,29 +1,81 @@ import os from importlib import import_module +from logging import INFO, FileHandler, Formatter, Logger, StreamHandler, getLogger +from typing import Sequence -from .pipeline import SandboxTask, identifiers_from_config_file, init_logger +from .pipeline import ITask, TaskRunner +LOGGING_FORMAT: str = "%(asctime)s : %(levelname)s : %(module)s : %(message)s" +DEFAULT_LOG_DIR: str = "logs" +DEFAULT_LOG_FILE: str = "log.txt" +DEFAULT_CONFIGURATION_DIR: str = "configs" TASK_FIELD_NAME: str = "task" -class TaskRunner(object): - def run(self) -> None: - raise NotImplementedError() - - -class ConfigurationFileRunner(TaskRunner): - def __init__(self, config_path: str): +class SandboxRunner(TaskRunner): + def __init__(self, config_path: str, sandbox_root_path: str = "outputs") -> None: self.config_path = config_path - module_name = os.path.splitext(config_path)[0].replace("/", ".") - module = import_module(module_name) - self.task: SandboxTask = getattr(module, TASK_FIELD_NAME) + self.sandbox_root_path = sandbox_root_path + + def get_root_folder_path(self) -> str: + return self.sandbox_root_path def run(self) -> None: experiment_ids = identifiers_from_config_file(self.config_path) - experiment_sandbox_path = os.path.join(*experiment_ids) + module_name = ".".join(experiment_ids) + module = import_module(module_name) + task: ITask = getattr(module, TASK_FIELD_NAME) + + pure_experiment_ids = drop_configuration_dir(experiment_ids=experiment_ids) + experiment_sandbox_path = os.path.join(*pure_experiment_ids) + sandbox_folder_path = os.path.join(self.get_root_folder_path(), experiment_sandbox_path) + logger = init_logger(experiment_ids, overwrite=True) logger.info(f"Running task from {self.config_path}") - sandbox_folder_path = os.path.join(self.task.get_sandbox_folder_path(), experiment_sandbox_path) if not os.path.exists(sandbox_folder_path) or not os.path.isdir(sandbox_folder_path): os.makedirs(sandbox_folder_path) - self.task.execute(sandbox_folder_path) + task.execute(sandbox_folder_path) + + +def identifiers_from_config_file(filepath: str) -> Sequence[str]: + path = os.path.normpath(filepath) + path_components = path.split(os.sep) + path_components[-1] = os.path.splitext(path_components[-1])[0] + return path_components + + +def drop_configuration_dir(experiment_ids: Sequence[str]) -> Sequence[str]: + config_dir_index = experiment_ids[:-1].index(DEFAULT_CONFIGURATION_DIR) + if config_dir_index != -1: + experiment_ids = experiment_ids[config_dir_index + 1 :] + return experiment_ids + + +def init_logger(experiment_identifiers: Sequence[str], overwrite: bool = True, log_to_stderr: bool = False) -> Logger: + path_components = [DEFAULT_LOG_DIR] + list(experiment_identifiers) + log_path = os.path.join(*path_components) + return configure_logger(log_path, file=DEFAULT_LOG_FILE, overwrite=overwrite, log_to_stderr=log_to_stderr) + + +def configure_logger( + path: str = DEFAULT_LOG_DIR, file: str = DEFAULT_LOG_FILE, overwrite: bool = True, log_to_stderr: bool = False +) -> Logger: + logger = getLogger() + logger.setLevel(INFO) + + logging_path = os.path.join(path) + if not os.path.exists(logging_path): + os.makedirs(logging_path) + + formatter = Formatter(LOGGING_FORMAT) + + fh = FileHandler(os.path.join(logging_path, file), mode="w" if overwrite else "a", encoding="utf-8") + fh.setFormatter(formatter) + logger.addHandler(fh) + + if log_to_stderr: + sh = StreamHandler() + sh.setFormatter(formatter) + logger.addHandler(sh) + + return logger diff --git a/src/language_model/tokenization/trainer.py b/src/language_model/tokenization/trainer.py index ad18371..1aaccca 100644 --- a/src/language_model/tokenization/trainer.py +++ b/src/language_model/tokenization/trainer.py @@ -3,10 +3,10 @@ from tokenizers import ByteLevelBPETokenizer -from ..pipeline import SandboxTask +from ..pipeline import ITask -class ByteLevelBPETokenizerTrainer(SandboxTask): +class ByteLevelBPETokenizerTrainer(ITask): def __init__( self, source_folder_path: str, From e12a87efe0a1a30108e080d1e3c91490a3018d8e Mon Sep 17 00:00:00 2001 From: Paul Khudan Date: Mon, 31 May 2021 15:56:48 +0300 Subject: [PATCH 02/28] combine two logger-initting methods --- src/language_model/runner.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/language_model/runner.py b/src/language_model/runner.py index 0045ba5..ddacde3 100644 --- a/src/language_model/runner.py +++ b/src/language_model/runner.py @@ -54,22 +54,17 @@ def drop_configuration_dir(experiment_ids: Sequence[str]) -> Sequence[str]: def init_logger(experiment_identifiers: Sequence[str], overwrite: bool = True, log_to_stderr: bool = False) -> Logger: path_components = [DEFAULT_LOG_DIR] + list(experiment_identifiers) log_path = os.path.join(*path_components) - return configure_logger(log_path, file=DEFAULT_LOG_FILE, overwrite=overwrite, log_to_stderr=log_to_stderr) - -def configure_logger( - path: str = DEFAULT_LOG_DIR, file: str = DEFAULT_LOG_FILE, overwrite: bool = True, log_to_stderr: bool = False -) -> Logger: logger = getLogger() logger.setLevel(INFO) - logging_path = os.path.join(path) + logging_path = os.path.join(log_path) if not os.path.exists(logging_path): os.makedirs(logging_path) formatter = Formatter(LOGGING_FORMAT) - fh = FileHandler(os.path.join(logging_path, file), mode="w" if overwrite else "a", encoding="utf-8") + fh = FileHandler(os.path.join(logging_path, DEFAULT_LOG_FILE), mode="w" if overwrite else "a", encoding="utf-8") fh.setFormatter(formatter) logger.addHandler(fh) From e1388b5904506248b7c071a57e5e02c300a319da Mon Sep 17 00:00:00 2001 From: Dmitry Chaplinsky Date: Tue, 3 Aug 2021 17:01:04 +0300 Subject: [PATCH 03/28] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b963812..b49e3e9 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,8 @@ Ukrainian Roberta is released via [HuggingFace Transformers library](https://hug ```python from transformers import pipeline, RobertaForMaskedLM, RobertaTokenizer -model = RobertaForMaskedLM.from_pretrained("ukr-roberta-base") -tokenizer = RobertaTokenizer.from_pretrained("ukr-roberta-base") +model = RobertaForMaskedLM.from_pretrained("youscan/ukr-roberta-base") +tokenizer = RobertaTokenizer.from_pretrained("youscan/ukr-roberta-base") fill_mask = pipeline('fill-mask', model=model, tokenizer=tokenizer) fill_mask("Тарас Шевченко – великий українсьский .") From 8c4adf94d3f05299a7aa8e6dae3bf05c633d9f0d Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Sat, 28 Aug 2021 18:06:24 +0300 Subject: [PATCH 04/28] ukr-gpt, FromIterableTextDataset, GroupTextForCasualLMDataset --- .isort.cfg | 2 +- configs/cyr/gpt/train_model/ukr-gpt.py | 94 +++++++++++++++++++ configs/cyr/gpt/train_tokenizer/ukr-gpt.py | 26 +++++ configs/ukr/train_model/ukr-roberta-base.py | 23 ----- .../ukr/train_tokenizer/ukr-roberta-base.py | 11 --- requirements.txt | 1 + src/language_model/data/dataset.py | 85 ++++++++++++++++- 7 files changed, 204 insertions(+), 38 deletions(-) create mode 100644 configs/cyr/gpt/train_model/ukr-gpt.py create mode 100644 configs/cyr/gpt/train_tokenizer/ukr-gpt.py delete mode 100644 configs/ukr/train_model/ukr-roberta-base.py delete mode 100644 configs/ukr/train_tokenizer/ukr-roberta-base.py diff --git a/.isort.cfg b/.isort.cfg index c270db0..06b1cb9 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -6,4 +6,4 @@ use_parentheses=True line_length=119 skip_glob=venv/*,stubs/* known_first_party = language_model -known_third_party = ds_shared,pynlple,setuptools,tokenizers,torch,transformers +known_third_party = datasets,ds_shared,pynlple,setuptools,tokenizers,torch,transformers diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py new file mode 100644 index 0000000..944f06b --- /dev/null +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -0,0 +1,94 @@ +from datasets import load_dataset +from transformers import ( + GPT2Config, + GPT2LMHeadModel, + IntervalStrategy, + PreTrainedTokenizerFast, + Trainer, + TrainingArguments, +) + +from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, GroupTextForCasualLMDataset +from language_model.modelling.trainer import TransformersTrainTask +from language_model.tokenization.factory import FAST_TOKENIZER_DEFAULT_FILE_NAME + +TOKENIZER_PATH = f"outputs/cyr/gpt/train_tokenizer/ukr-gpt/{FAST_TOKENIZER_DEFAULT_FILE_NAME}" + +MODEL_MAX_LENGTH = 2048 + + +# tokenizer +tokenizer = PreTrainedTokenizerFast( + tokenizer_file=TOKENIZER_PATH, model_max_length=MODEL_MAX_LENGTH, padding_side="right" +) +tokenizer.add_special_tokens({"bos_token": "<|endoftext|>"}) +tokenizer.pad_token = tokenizer.bos_token + +# model +model_config = GPT2Config( + vocab_size=len(tokenizer), + n_positions=MODEL_MAX_LENGTH, + n_ctx=MODEL_MAX_LENGTH, + bos_token_id=tokenizer.bos_token_id, +) +model = GPT2LMHeadModel(model_config) + + +# data +# oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train")) +# mc4_train = (item["text"] for item in load_dataset("mc4", "uk", split="train")) +# cc100_train = (item["text"] for item in load_dataset("cc100", "uk", split="train")) +# mc4_valid = (item["text"] for item in load_dataset("mc4", "uk", split="validation")) + +oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train[:5000]")) +oscar_valid = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train[5000:10000]")) + +train_dataset = GroupTextForCasualLMDataset( + tokenizer=tokenizer, data_sources=[oscar_train], block_size=MODEL_MAX_LENGTH +) +valid_dataset = GroupTextForCasualLMDataset( + tokenizer=tokenizer, data_sources=[oscar_valid], block_size=MODEL_MAX_LENGTH +) +data_collator = DataCollatorForGroupTextForCasualLMDataset() + +# train-iters 500000 +# batch per gpu 4, grad acc 4, whole batch 256 samples == 512k tokens + +training_args = TrainingArguments( + do_train=True, + do_eval=True, + evaluation_strategy=IntervalStrategy.STEPS, + eval_steps=250000, + num_train_epochs=3, + per_device_train_batch_size=4, + gradient_accumulation_steps=4, + per_device_eval_batch_size=4, + output_dir="temp", + overwrite_output_dir=True, + save_steps=250000, + save_total_limit=2, + prediction_loss_only=False, + learning_rate=5e-5, + warmup_ratio=0.004, + fp16=True, + logging_dir="logs", + seed=42, + lr_scheduler_type="cosine", # type: ignore + logging_first_step=True, + logging_steps=500, + label_names=["labels"], + load_best_model_at_end=True, + group_by_length=False, + report_to=["mlflow"], # ??? +) + +trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=valid_dataset, + data_collator=data_collator, +) + +print("task") +task = TransformersTrainTask(trainer=trainer) diff --git a/configs/cyr/gpt/train_tokenizer/ukr-gpt.py b/configs/cyr/gpt/train_tokenizer/ukr-gpt.py new file mode 100644 index 0000000..ec44308 --- /dev/null +++ b/configs/cyr/gpt/train_tokenizer/ukr-gpt.py @@ -0,0 +1,26 @@ +from itertools import chain + +from datasets import load_dataset +from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers + +from language_model.tokenization.trainer import TrainTokenizerTask + +tokenizer = Tokenizer(models.BPE()) + +tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True) +tokenizer.decoder = decoders.ByteLevel() +tokenizer.post_processor = processors.ByteLevel(trim_offsets=True) + + +oscar_uk = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train")) +# mc4_uk = (item["text"] for item in load_dataset("mc4", "uk", split="train")) +# cc100_uk = (item["text"] for item in load_dataset("cc100", "uk", split="train")) + +trainer = trainers.BpeTrainer(vocab_size=50264, special_tokens=["<|endoftext|>"]) + +task = TrainTokenizerTask( + tokenizer=tokenizer, + # iterator=chain(oscar_uk, mc4_uk, cc100_uk), + iterator=chain(oscar_uk), + trainer=trainer, +) diff --git a/configs/ukr/train_model/ukr-roberta-base.py b/configs/ukr/train_model/ukr-roberta-base.py deleted file mode 100644 index fd0d8f0..0000000 --- a/configs/ukr/train_model/ukr-roberta-base.py +++ /dev/null @@ -1,23 +0,0 @@ -from transformers import RobertaConfig, RobertaForMaskedLM, RobertaTokenizer - -from language_model.modelling.trainer import RobertaForMaskedLMTrainTask - -_model_config = RobertaConfig( - vocab_size=52000, - max_position_embeddings=514, - num_attention_heads=12, - num_hidden_layers=12, - type_vocab_size=1, - intermediate_size=3072, -) - -_model = RobertaForMaskedLM(_model_config) - -_tokenizer = RobertaTokenizer.from_pretrained("outputs/ukr/train_tokenizer/ukr-roberta-base/tokenizer", max_len=512) - -task = RobertaForMaskedLMTrainTask( - file_path="data/ukr/aggregated_data/ukr-roberta-base/data.txt", - model=_model, - tokenizer=_tokenizer, - batch_size_per_gpu=40, -) diff --git a/configs/ukr/train_tokenizer/ukr-roberta-base.py b/configs/ukr/train_tokenizer/ukr-roberta-base.py deleted file mode 100644 index cf9beeb..0000000 --- a/configs/ukr/train_tokenizer/ukr-roberta-base.py +++ /dev/null @@ -1,11 +0,0 @@ -from tokenizers.implementations import ByteLevelBPETokenizer - -from language_model.tokenization.trainer import ByteLevelBPETokenizerTrainer - -task = ByteLevelBPETokenizerTrainer( - source_folder_path="data/ukr/data/wiki_oscar_data/", - tokenizer=ByteLevelBPETokenizer(), - vocab_size=52000, - min_frequency=5, - special_tokens=["", "", "", "", ""], -) diff --git a/requirements.txt b/requirements.txt index 67488bb..548a556 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +datasets==1.11.0 pyNlple==0.7.5 tokenizers==0.10.1 torch==1.8.1 diff --git a/src/language_model/data/dataset.py b/src/language_model/data/dataset.py index 307915d..366ed51 100644 --- a/src/language_model/data/dataset.py +++ b/src/language_model/data/dataset.py @@ -2,13 +2,13 @@ import logging import math from itertools import chain -from typing import Dict, Iterable, Iterator, List, Optional, Sequence +from typing import Dict, Iterable, Iterator, List, Optional, Sequence, Union import torch from torch._utils import _accumulate from torch.utils.data import Dataset from torch.utils.data.dataset import T_co -from transformers import PreTrainedTokenizer +from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast class LazyDataset(Dataset): @@ -76,7 +76,7 @@ def _read_chunk(self) -> Iterator[List[str]]: def __linit_entries__(self) -> Sequence[T_co]: logging.info(f"Creating features from dataset files: {self.file_paths}") - entries: List[List[Dict[str, torch.tensor]]] = [] + entries: List[List[Dict[str, torch.Tensor]]] = [] for lines in self._read_chunk(): entries.append(self._extract_batch(lines)) logging.info(f"Currently read total {sum(map(len, entries))} at end") @@ -126,3 +126,82 @@ def __len__(self) -> int: def split_lazy_dataset(dataset: LazyDataset, portions: Sequence[float]) -> List[LazySubset]: portions_provider = Portions(dataset=dataset, portions=portions) return [LazySubset(dataset, portions_provider=portions_provider, portion_id=i) for i in range(len(portions))] + + +class FromIterableTextDataset(LazyDataset): + def __init__( + self, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + data_sources: Iterable[Iterable[str]], + block_size: int, + return_overflowing_tokens: bool = True, + process_batch_size: int = 8192, + ) -> None: + super().__init__() + self.return_overflowing_tokens = return_overflowing_tokens + self.tokenizer = tokenizer + self.data_sources = data_sources + self.block_size = block_size + self.process_batch_size = process_batch_size + + def _read_chunk(self) -> Iterator[List[str]]: + lines: List[str] = [] + for data_source in self.data_sources: + for line in data_source: + if len(line) > 0 and not line.isspace(): + lines.append(line) + + if len(lines) == self.process_batch_size: + yield lines + lines = [] + if len(lines) > 0: + yield lines + + def __linit_entries__(self) -> Sequence[T_co]: + logging.info(f"Creating features from data_sources") + entries: List[List[Dict[str, torch.Tensor]]] = [] + for lines in self._read_chunk(): + entries.append(self._extract_batch(lines)) + logging.info(f"Currently read total {sum(map(len, entries))} at end") + logging.info("Extracted and converted training data to `input_ids`.") + return list(chain.from_iterable(entries)) # type: ignore + + def _extract_batch(self, lines: List[str]) -> List[Dict[str, torch.Tensor]]: + raise NotImplementedError() + + +class GroupTextForCasualLMDataset(FromIterableTextDataset): + def _extract_batch(self, lines: List[str]) -> List[Dict[str, torch.Tensor]]: + batch_encoding: List[Dict[str, torch.Tensor]] = [] + current_line = [self.tokenizer.bos_token] + for line in lines: + tokens = self.tokenizer.tokenize(line) + if len(current_line) + len(tokens) + 1 <= self.block_size: + current_line.append(self.tokenizer.bos_token) + current_line.extend(tokens) + elif len(current_line) == self.block_size: + input_ids = self.tokenizer.convert_tokens_to_ids(current_line) + batch_encoding.append({"input_ids": torch.tensor(input_ids), "labels": torch.tensor(input_ids)}) + current_line = [self.tokenizer.bos_token] + tokens + else: + current_line.append(self.tokenizer.bos_token) + n_tokens_to_add = self.block_size - len(current_line) + current_line.extend(tokens[:n_tokens_to_add]) + input_ids = self.tokenizer.convert_tokens_to_ids(current_line) + batch_encoding.append({"input_ids": torch.tensor(input_ids), "labels": torch.tensor(input_ids)}) + + tokens = tokens[n_tokens_to_add:] + while len(tokens) >= self.block_size: + input_ids = self.tokenizer.convert_tokens_to_ids(tokens[: self.block_size]) + batch_encoding.append({"input_ids": torch.tensor(input_ids), "labels": torch.tensor(input_ids)}) + tokens = tokens[self.block_size :] + + current_line = tokens + return batch_encoding + + +class DataCollatorForGroupTextForCasualLMDataset: + def __call__(self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: + input_ids = torch.stack([example["input_ids"] for example in examples], dim=0) + labels = torch.stack([example["labels"] for example in examples], dim=0) + return {"input_ids": input_ids, "labels": labels} From fd92a6ba7a01a54781d5e1ae49f0c95a466daf2b Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Tue, 31 Aug 2021 13:15:05 +0300 Subject: [PATCH 05/28] MinHashLSH deduplication, Wiki --- configs/cyr/gpt/load_data/wiki.py | 3 ++ requirements.txt | 5 +++ src/language_model/data/extract.py | 50 +++++++++++++++++++++++++++++- src/language_model/data/load.py | 10 ++++++ 4 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 configs/cyr/gpt/load_data/wiki.py diff --git a/configs/cyr/gpt/load_data/wiki.py b/configs/cyr/gpt/load_data/wiki.py new file mode 100644 index 0000000..3323178 --- /dev/null +++ b/configs/cyr/gpt/load_data/wiki.py @@ -0,0 +1,3 @@ +from language_model.data.load import WikiDownloadTask + +task = WikiDownloadTask(url="https://dumps.wikimedia.org/ukwiki/latest/ukwiki-latest-pages-articles.xml.bz2") diff --git a/requirements.txt b/requirements.txt index 548a556..7f050ed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,8 @@ pyNlple==0.7.5 tokenizers==0.10.1 torch==1.8.1 transformers==4.4.2 +wikiextractor==3.0.4 +bs4==0.0.1 +wget==3.2 +numpy==1.19.5 +Cython==3.0.0a9 diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 3f80bca..0c7130b 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -3,16 +3,23 @@ import os from collections import Hashable as HashableType from collections import OrderedDict -from typing import Any, Callable, Dict, Hashable, Iterable, Iterator, Optional +from pathlib import Path +from typing import Any, Callable, Dict, Hashable, Iterable, Iterator, Optional, Union, List, Set, Tuple +import numpy as np from ds_shared.loading import load_pickle from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, SplittingSource, StackingSource from pynlple.data.filesource import FilePathSource from pynlple.data.source import Source from pynlple.processing.preprocessor import BoldTagReplacer, IPreprocessor, StackingPreprocessor +from bs4 import BeautifulSoup + from ..pipeline import ITask +from lsh import minhash, cache # TODO: add installation + + MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -83,6 +90,22 @@ def __iter__(self) -> Iterator[Any]: return iter(load_pickle(self.pickle_filepath)) +class PostWikiExtractorDataSource(Source): + """Provides wiki articles preprocessed by `python -m wikiextractor.WikiExtractor dump.xml.bz2 ...`""" + def __init__(self, article_dir: str) -> None: + super().__init__() + self.article_dir = Path(article_dir) + + def __iter__(self) -> Iterator[Any]: + for subdir in self.article_dir.glob("*"): + for file in subdir.glob("*"): + with open(file) as f: + data = f.read() + soup = BeautifulSoup(data, "lxml") + for doc in soup.find_all("doc"): + yield {"id": doc["id"], "title": doc["title"], "url": doc["url"], "text": doc.text} + + class ExtractTextsFromData(ITask): def __init__( self, @@ -138,3 +161,28 @@ def execute(self, environment_path: str) -> None: output_stream.write("\n") lines += 1 logging.info(f"Completed extraction of texts: {lines} lines written to file.") + + +class MinHashLSHDeduplicator: + def __init__(self, seeds: Union[int, np.ndarray], char_ngram: int, bands: int): + hasher = minhash.MinHasher(seeds=seeds, char_ngram=char_ngram) + self.lsh_cache = cache.Cache(num_bands=bands, hasher=hasher) + + def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) -> List[str]: + if clear: + self.lsh_cache.clear() + + added = set() + deduplicated_docs = [] + for i, j in self.get_all_duplicates(docs, min_jaccard): + if i not in added and j not in added: + added.add(i) + added.add(j) + deduplicated_docs.append(docs[i]) + + return deduplicated_docs + + def get_all_duplicates(self, docs: List[str], min_jaccard: float) -> Set[Tuple[int, int]]: + for i, doc in enumerate(docs): + self.lsh_cache.add_doc(doc, i) + return self.lsh_cache.get_all_duplicates(min_jaccard) diff --git a/src/language_model/data/load.py b/src/language_model/data/load.py index ce1bfa5..2bbca11 100644 --- a/src/language_model/data/load.py +++ b/src/language_model/data/load.py @@ -3,6 +3,7 @@ import os from typing import Any, Callable, Dict, Optional +import wget from ds_shared.download import YsDownloader from ds_shared.saving import save_pickle @@ -43,3 +44,12 @@ def execute(self, environment_path: str) -> None: mentions_chunk = downloader.download(self.topic_id, last_mention_id=last_mention_id) logging.info("Download completed.") + + +class WikiDownloadTask(ITask): + def __init__(self, url: str): + self.url = url + + def execute(self, environment_path: str) -> None: + wget.download(self.url, os.path.join(environment_path, self.url.split("/")[-1])) + logging.info("Download completed.") From e4ae992005e14c83f300fec6aa2278b93acca449 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Tue, 31 Aug 2021 13:15:19 +0300 Subject: [PATCH 06/28] MinHashLSH deduplication, Wiki --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 3df03d6..b27c139 100644 --- a/.gitignore +++ b/.gitignore @@ -94,3 +94,5 @@ apex/ /data/ results/ outputs/ + +LSH/ From e41d5b19bffc5de673405a9fb646ab80275f1bdc Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Tue, 31 Aug 2021 13:18:19 +0300 Subject: [PATCH 07/28] configs --- .gitignore | 4 ++-- .isort.cfg | 2 +- configs/cyr/gpt/load_data/in-house.py | 9 +++++++++ configs/cyr/gpt/train_model/ukr-gpt.py | 16 +++++++--------- 4 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 configs/cyr/gpt/load_data/in-house.py diff --git a/.gitignore b/.gitignore index b27c139..360ffac 100644 --- a/.gitignore +++ b/.gitignore @@ -91,8 +91,8 @@ ENV/ .idea/ .mypy_cache/ apex/ +LSH/ /data/ results/ outputs/ - -LSH/ +lab/ diff --git a/.isort.cfg b/.isort.cfg index 06b1cb9..615da3c 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -6,4 +6,4 @@ use_parentheses=True line_length=119 skip_glob=venv/*,stubs/* known_first_party = language_model -known_third_party = datasets,ds_shared,pynlple,setuptools,tokenizers,torch,transformers +known_third_party = bs4,datasets,ds_shared,numpy,pynlple,setuptools,tokenizers,torch,transformers,wget diff --git a/configs/cyr/gpt/load_data/in-house.py b/configs/cyr/gpt/load_data/in-house.py new file mode 100644 index 0000000..93d005f --- /dev/null +++ b/configs/cyr/gpt/load_data/in-house.py @@ -0,0 +1,9 @@ +from language_model.data.load import YSDataDownloadTask +from language_model.data.processing import LightweightMention + +task = YSDataDownloadTask( + credentials_path="credentials", + topic_id=275648, + query={"from": "2019-01-01", "to": "2021-09-01", "sanitize": False, "dedup": True}, + mention_processor=LightweightMention(), +) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 944f06b..25c6104 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -14,31 +14,29 @@ TOKENIZER_PATH = f"outputs/cyr/gpt/train_tokenizer/ukr-gpt/{FAST_TOKENIZER_DEFAULT_FILE_NAME}" -MODEL_MAX_LENGTH = 2048 +MODEL_MAX_LENGTH = 1024 # tokenizer tokenizer = PreTrainedTokenizerFast( tokenizer_file=TOKENIZER_PATH, model_max_length=MODEL_MAX_LENGTH, padding_side="right" ) -tokenizer.add_special_tokens({"bos_token": "<|endoftext|>"}) +tokenizer.add_special_tokens({"bos_token": "<|endoftext|>"}) # TODO: tokenizer saving +# basically `pad_token` wont be used, as DataCollatorForGroupTextForCasualLMDataset pack sequences up to max_length +# but to avoid an error within DataCollatorForGroupTextForCasualLMDataset tokenizer.pad_token = tokenizer.bos_token # model -model_config = GPT2Config( - vocab_size=len(tokenizer), - n_positions=MODEL_MAX_LENGTH, - n_ctx=MODEL_MAX_LENGTH, - bos_token_id=tokenizer.bos_token_id, -) +model_config = GPT2Config(vocab_size=len(tokenizer), bos_token_id=tokenizer.bos_token_id) model = GPT2LMHeadModel(model_config) -# data +# data # TODO # oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train")) # mc4_train = (item["text"] for item in load_dataset("mc4", "uk", split="train")) # cc100_train = (item["text"] for item in load_dataset("cc100", "uk", split="train")) # mc4_valid = (item["text"] for item in load_dataset("mc4", "uk", split="validation")) +# wiki_train = (item["text"] for item in PostWikiExtractorDataSource(WIKI_EXTRACTED_PATH)) oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train[:5000]")) oscar_valid = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train[5000:10000]")) From 35139701a20358365d6e493d94edbc11f41ddb66 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 6 Sep 2021 14:40:28 +0300 Subject: [PATCH 08/28] updated with experiments --- .gitignore | 1 + configs/cyr/gpt/load_data/in-house.py | 2 +- configs/cyr/gpt/train_model/ukr-gpt.py | 43 ++-- configs/cyr/gpt/train_tokenizer/ukr-gpt.py | 17 +- requirements.txt | 1 + requirements_installation.sh | 6 + src/language_model/data/extract.py | 279 ++++++++++++++++----- src/language_model/tokenization/trainer.py | 2 +- 8 files changed, 256 insertions(+), 95 deletions(-) diff --git a/.gitignore b/.gitignore index 360ffac..5f3943a 100644 --- a/.gitignore +++ b/.gitignore @@ -96,3 +96,4 @@ LSH/ results/ outputs/ lab/ +credentials diff --git a/configs/cyr/gpt/load_data/in-house.py b/configs/cyr/gpt/load_data/in-house.py index 93d005f..a6acb90 100644 --- a/configs/cyr/gpt/load_data/in-house.py +++ b/configs/cyr/gpt/load_data/in-house.py @@ -4,6 +4,6 @@ task = YSDataDownloadTask( credentials_path="credentials", topic_id=275648, - query={"from": "2019-01-01", "to": "2021-09-01", "sanitize": False, "dedup": True}, + query={"from": "2019-01-01", "to": "2021-09-01", "sanitize": False, "dedup": False}, mention_processor=LightweightMention(), ) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 25c6104..a5eda37 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -1,4 +1,3 @@ -from datasets import load_dataset from transformers import ( GPT2Config, GPT2LMHeadModel, @@ -9,11 +8,15 @@ ) from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, GroupTextForCasualLMDataset -from language_model.modelling.trainer import TransformersTrainTask +from language_model.data.extract import LineByLineSource, ShuffledSources +from language_model.modelling.trainer import TransformersTrainTaskWithTokenizerSaving from language_model.tokenization.factory import FAST_TOKENIZER_DEFAULT_FILE_NAME TOKENIZER_PATH = f"outputs/cyr/gpt/train_tokenizer/ukr-gpt/{FAST_TOKENIZER_DEFAULT_FILE_NAME}" +IN_HOUSE_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/in-house-data/texts.txt" +OPEN_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt" +VALIDATION_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/validation.txt" MODEL_MAX_LENGTH = 1024 @@ -21,7 +24,7 @@ tokenizer = PreTrainedTokenizerFast( tokenizer_file=TOKENIZER_PATH, model_max_length=MODEL_MAX_LENGTH, padding_side="right" ) -tokenizer.add_special_tokens({"bos_token": "<|endoftext|>"}) # TODO: tokenizer saving +tokenizer.add_special_tokens({"bos_token": "<|endoftext|>"}) # basically `pad_token` wont be used, as DataCollatorForGroupTextForCasualLMDataset pack sequences up to max_length # but to avoid an error within DataCollatorForGroupTextForCasualLMDataset tokenizer.pad_token = tokenizer.bos_token @@ -31,42 +34,37 @@ model = GPT2LMHeadModel(model_config) -# data # TODO -# oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train")) -# mc4_train = (item["text"] for item in load_dataset("mc4", "uk", split="train")) -# cc100_train = (item["text"] for item in load_dataset("cc100", "uk", split="train")) -# mc4_valid = (item["text"] for item in load_dataset("mc4", "uk", split="validation")) -# wiki_train = (item["text"] for item in PostWikiExtractorDataSource(WIKI_EXTRACTED_PATH)) - -oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train[:5000]")) -oscar_valid = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train[5000:10000]")) +# data +train_data_source = ShuffledSources( + (text for text in LineByLineSource(IN_HOUSE_TRAIN_DATA_PATH)), + (text for text in LineByLineSource(OPEN_TRAIN_DATA_PATH)) +) +validation_data_path = LineByLineSource(VALIDATION_DATA_PATH) train_dataset = GroupTextForCasualLMDataset( - tokenizer=tokenizer, data_sources=[oscar_train], block_size=MODEL_MAX_LENGTH + tokenizer=tokenizer, data_source=train_data_source, block_size=MODEL_MAX_LENGTH ) valid_dataset = GroupTextForCasualLMDataset( - tokenizer=tokenizer, data_sources=[oscar_valid], block_size=MODEL_MAX_LENGTH + tokenizer=tokenizer, data_source=validation_data_path, block_size=MODEL_MAX_LENGTH ) data_collator = DataCollatorForGroupTextForCasualLMDataset() -# train-iters 500000 -# batch per gpu 4, grad acc 4, whole batch 256 samples == 512k tokens training_args = TrainingArguments( do_train=True, do_eval=True, evaluation_strategy=IntervalStrategy.STEPS, eval_steps=250000, - num_train_epochs=3, - per_device_train_batch_size=4, - gradient_accumulation_steps=4, + num_train_epochs=5, + per_device_train_batch_size=8, # overall bs = 8 * 8 * num_gpus (GPT2 used 512) + gradient_accumulation_steps=8, per_device_eval_batch_size=4, output_dir="temp", overwrite_output_dir=True, save_steps=250000, save_total_limit=2, prediction_loss_only=False, - learning_rate=5e-5, + learning_rate=0.0002, # (was manually tuned in GPT2 on held-out validation) warmup_ratio=0.004, fp16=True, logging_dir="logs", @@ -77,7 +75,7 @@ label_names=["labels"], load_best_model_at_end=True, group_by_length=False, - report_to=["mlflow"], # ??? + report_to=["mlflow"], ) trainer = Trainer( @@ -88,5 +86,4 @@ data_collator=data_collator, ) -print("task") -task = TransformersTrainTask(trainer=trainer) +task = TransformersTrainTaskWithTokenizerSaving(trainer=trainer) diff --git a/configs/cyr/gpt/train_tokenizer/ukr-gpt.py b/configs/cyr/gpt/train_tokenizer/ukr-gpt.py index ec44308..c0250d3 100644 --- a/configs/cyr/gpt/train_tokenizer/ukr-gpt.py +++ b/configs/cyr/gpt/train_tokenizer/ukr-gpt.py @@ -1,8 +1,8 @@ -from itertools import chain +from itertools import islice -from datasets import load_dataset from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers +from language_model.data.extract import LineByLineSource from language_model.tokenization.trainer import TrainTokenizerTask tokenizer = Tokenizer(models.BPE()) @@ -12,15 +12,16 @@ tokenizer.post_processor = processors.ByteLevel(trim_offsets=True) -oscar_uk = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train")) -# mc4_uk = (item["text"] for item in load_dataset("mc4", "uk", split="train")) -# cc100_uk = (item["text"] for item in load_dataset("cc100", "uk", split="train")) - +TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/train.txt" +NUM_TRAIN_LINES = 1_000_000 +TRAIN_SAMPLING_STEP = 200 +train_data_source = islice( + (line for i, line in enumerate(LineByLineSource(TRAIN_DATA_PATH)) if i % TRAIN_SAMPLING_STEP == 0), NUM_TRAIN_LINES +) trainer = trainers.BpeTrainer(vocab_size=50264, special_tokens=["<|endoftext|>"]) task = TrainTokenizerTask( tokenizer=tokenizer, - # iterator=chain(oscar_uk, mc4_uk, cc100_uk), - iterator=chain(oscar_uk), + iterator=train_data_source, trainer=trainer, ) diff --git a/requirements.txt b/requirements.txt index 7f050ed..3b44691 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,3 +8,4 @@ bs4==0.0.1 wget==3.2 numpy==1.19.5 Cython==3.0.0a9 +lxml==4.6.3 diff --git a/requirements_installation.sh b/requirements_installation.sh index 7e94f07..1d13a40 100644 --- a/requirements_installation.sh +++ b/requirements_installation.sh @@ -9,6 +9,12 @@ pip install -r requirements.dev.txt pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ ) +( + git clone https://github.com/mattilyra/LSH || { echo "Failed to download and install LSH"; exit 1; } + cd LSH && \ + python setup.py install +) + pip install -e . pre-commit install diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 0c7130b..34ad88f 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -1,25 +1,26 @@ -import io +import gc import logging -import os +import multiprocessing +import random from collections import Hashable as HashableType from collections import OrderedDict +from concurrent.futures import ProcessPoolExecutor +from itertools import islice from pathlib import Path -from typing import Any, Callable, Dict, Hashable, Iterable, Iterator, Optional, Union, List, Set, Tuple +from typing import Any, Callable, Dict, Hashable, Iterable, Iterator, Optional, Union, List, Set, Tuple, Generator import numpy as np +from bs4 import BeautifulSoup from ds_shared.loading import load_pickle -from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, SplittingSource, StackingSource +from lsh import minhash, cache +from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, StackingSource from pynlple.data.filesource import FilePathSource from pynlple.data.source import Source -from pynlple.processing.preprocessor import BoldTagReplacer, IPreprocessor, StackingPreprocessor - -from bs4 import BeautifulSoup +from pynlple.processing.preprocessor import IPreprocessor +from .utils import write_to_texts_file, write_to_train_val_files from ..pipeline import ITask -from lsh import minhash, cache # TODO: add installation - - MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -81,6 +82,37 @@ def __iter__(self) -> Iterator[Any]: logging.info(f"f={id(self.feature_extractor)} skipped {skipped}/{total}") +class LineByLineSource(Source): + def __init__(self, text_filepath: str) -> None: + super().__init__() + self.text_filepath = text_filepath + + def __iter__(self) -> Iterator[str]: + with open(self.text_filepath, "r") as f: + for line in f: + line = line.strip() + if line: + yield line + + +class ShuffledSources(Source): + def __init__(self, *sources: Generator[str, None, None]) -> None: + self.sources = list(sources) + + def __iter__(self) -> Iterable[str]: + return self + + def __next__(self) -> str: + if not self.sources: + raise StopIteration + source_id = random.choice(range(len(self.sources))) + try: + return next(iter(self.sources[source_id])) + except StopIteration: + self.sources.pop(source_id) + return next(self) + + class PickleDataSource(Source): def __init__(self, pickle_filepath: str) -> None: super().__init__() @@ -99,90 +131,213 @@ def __init__(self, article_dir: str) -> None: def __iter__(self) -> Iterator[Any]: for subdir in self.article_dir.glob("*"): for file in subdir.glob("*"): - with open(file) as f: + with open(file, "r") as f: data = f.read() soup = BeautifulSoup(data, "lxml") for doc in soup.find_all("doc"): yield {"id": doc["id"], "title": doc["title"], "url": doc["url"], "text": doc.text} +class FromLoadedYsDataSource(Source): + def __init__(self, source_folder_paths: Iterable[str], preprocessor: Optional[IPreprocessor] = None): + self.source_folder_paths = source_folder_paths + self.preprocessor = preprocessor + + def __iter__(self) -> Iterator[str]: + filepath_source = FilePathSource(paths=self.source_folder_paths, extension_suffix=".p") + json_data_source = StackingSource([PickleDataSource(path) for path in filepath_source]) + subtitles_filtering_source = FilteringSource( + json_data_source, condition=lambda mention: "subtitles" not in mention.get("contentTypes", set()) + ) + text_hash_filtered_source = CacheDeduplicatingSource( + subtitles_filtering_source, cache_size=100000, refresh=False, feature_extractor=pick_text_hash, log=20000 + ) + text_source = JsonFieldSource(text_hash_filtered_source, key="text", default="") + if self.preprocessor is not None: + yield from MappingSource(text_source, function=self.preprocessor.preprocess) + else: + yield from text_source + + +class MinHashLSHDeduplicator: + def __init__(self, seeds: Union[int, np.ndarray], char_ngram: int, bands: int, workers: int = -1): + self.workers = multiprocessing.cpu_count() if workers == -1 else workers + hasher = minhash.MinHasher(seeds=seeds, char_ngram=char_ngram, random_state=42) + self.lsh_cache = cache.Cache(num_bands=bands, hasher=hasher) + + def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) -> List[str]: + if clear: + self.lsh_cache.clear() + + duplicate_ids = set() + keep_form_duplicate_ids = set() + for i, j in self.get_all_duplicates(docs, min_jaccard): + if i not in duplicate_ids and j not in duplicate_ids: + keep_form_duplicate_ids.add(i) + elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: + keep_form_duplicate_ids.remove(j) + duplicate_ids.add(i) + duplicate_ids.add(j) + + keep = set(range(len(docs))) - duplicate_ids | keep_form_duplicate_ids + + return [docs[i] for i in keep] + + def in_memory_batch_deduplicate(self, docs: Iterable[str], min_jaccard: float, batch_size: int) -> Iterable[str]: + batch_docs = list(islice(docs, batch_size)) + while batch_docs: + batch_docs = self.deduplicate(batch_docs, min_jaccard=min_jaccard, clear=True) + gc.collect() + add_into_batch = batch_size - len(batch_docs) + if add_into_batch > 0: + new_docs = list(islice(docs, add_into_batch)) + if not new_docs: + break + batch_docs.extend(new_docs) + else: + yield from batch_docs + batch_docs = list(islice(docs, batch_size)) + yield from batch_docs + + def lsh_batch_deduplicate( + self, docs: Iterable[str], min_jaccard: float, batch_size: int, clear: bool = True + ) -> Iterable[str]: + """ + batch_size: max size of docs will be deduplicated at time, while `batch_docs` list will be extended + until `batch_size` unique docs will be collected into it + """ + if clear: + self.lsh_cache.clear() + + start_id, end_id = 0, batch_size + duplicate_ids, keep_form_duplicate_ids = set(), set() + + batch_docs = list(islice(docs, batch_size)) + + while batch_docs: + + new_duplicate_ids = set() + # batch_docs[start_id:] to process just recently appended docs, + # start_id = start_id to set new ids for recently appended docs + for i, j in self.get_all_duplicates(batch_docs[start_id:], min_jaccard, start_id=start_id): + if i not in duplicate_ids and j not in duplicate_ids: + keep_form_duplicate_ids.add(i) + elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: + keep_form_duplicate_ids.remove(j) + duplicate_ids.add(i) + duplicate_ids.add(j) + new_duplicate_ids.add(i) + new_duplicate_ids.add(j) + + # new_duplicate_ids - to clear duplicates from recently appended + # docs, as previous duplicate ids have already cleared + drop_ids = new_duplicate_ids - keep_form_duplicate_ids + + if drop_ids: + for i in drop_ids: + self.lsh_cache.remove_id(i) + + start_id = end_id + end_id = start_id + len(drop_ids) + else: + # all non duplicated + keep_form_duplicate_ids + for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: + yield batch_docs[i] + + # new batch + self.lsh_cache.clear() + start_id, end_id = 0, batch_size + duplicate_ids, keep_form_duplicate_ids = set(), set() + batch_docs = [] + + # if drop_ids: we append next len(drop_ids) examples + batch_docs.extend(islice(docs, end_id - start_id)) + + if batch_docs: + # all non duplicated + keep_form_duplicate_ids + for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: + yield batch_docs[i] + + def get_all_duplicates(self, docs: Iterable[str], min_jaccard: float, start_id: int = 0) -> Set[Tuple[int, int]]: + self._cache_texts_parallel(docs, start_id=start_id) + import pdb; pdb.set_trace() + return self.lsh_cache.get_all_duplicates(min_jaccard) + + def _cache_texts(self, docs: Iterable[str], start_id: int = 0) -> None: + for i, doc in enumerate(docs, start_id): + self.lsh_cache.add_doc(doc, i) + + def _cache_texts_parallel(self, docs: Iterable[str], start_id: int = 0) -> None: + encoded_docs = (doc.encode("utf8") for doc in docs) + with ProcessPoolExecutor(max_workers=self.workers) as executor: + for i, fingerprint in enumerate(executor.map(self.lsh_cache.hasher.fingerprint, encoded_docs), start_id): + self.lsh_cache.add_fingerprint(fingerprint, i) + + class ExtractTextsFromData(ITask): def __init__( self, - source_folder_paths: Iterable[str], + text_source: Iterable[str], preprocessor: Optional[IPreprocessor] = None, min_text_length: int = MIN_TEXT_LEN, min_text_token_length: int = MIN_TEXT_TOKEN_LENGTH, + cache_size: int = 100_000 ) -> None: super().__init__() - preprocessors = [BoldTagReplacer()] - if preprocessor is not None: - preprocessors.append(preprocessor) - self.preprocessor = StackingPreprocessor(preprocessor_list=preprocessors) + self.preprocessor = preprocessor self.min_text_length = min_text_length self.min_text_token_length = min_text_token_length - self.source_folder_paths = source_folder_paths + self.text_source = text_source + self.cache_size = cache_size def execute(self, environment_path: str) -> None: - filepath_source = FilePathSource(paths=self.source_folder_paths, extension_suffix=".p") - json_data_source = StackingSource([PickleDataSource(path) for path in filepath_source]) - subtitles_filtering_source = FilteringSource( - json_data_source, condition=lambda mention: "subtitles" not in mention.get("contentTypes", set()) - ) - text_hash_filtered_source = CacheDeduplicatingSource( - subtitles_filtering_source, cache_size=100000, refresh=False, feature_extractor=pick_text_hash, log=20000 - ) - text_source = JsonFieldSource(text_hash_filtered_source, key="text", default="") - line_text_source = SplittingSource(text_source, splitting_function=str.splitlines) - processed_text_source = MappingSource(line_text_source, function=self.preprocessor.preprocess) - short_text_filtered_source = FilteringSource( - processed_text_source, + self._write_to_file(self._deduplicate(self._filter(self._preprocess())), environment_path=environment_path) + + def _preprocess(self) -> Source: + return MappingSource(self.text_source, function=self.preprocessor.preprocess) + + def _filter(self, preprocessed_source: Source) -> Source: + return FilteringSource( + preprocessed_source, condition=lambda x: len(x) >= self.min_text_length and len(x.split()) >= self.min_text_token_length, ) + + def _deduplicate(self, filtered_source: Source) -> Iterable[str]: left_bound_duplicate_filtered_source = CacheDeduplicatingSource( - short_text_filtered_source, - cache_size=100000, + filtered_source, + cache_size=self.cache_size, refresh=False, feature_extractor=lambda text: str(text[:50]), log=10000, ) right_bound_duplicate_filtered_source = CacheDeduplicatingSource( left_bound_duplicate_filtered_source, - cache_size=100000, + cache_size=self.cache_size, refresh=False, feature_extractor=lambda text: str(text[-50:]), log=10000, ) - output_file_path = os.path.join(environment_path, "texts.txt") - lines = 0 - with io.open(output_file_path, mode="wt", encoding="utf-8") as output_stream: - for line in right_bound_duplicate_filtered_source: - output_stream.write(line) - output_stream.write("\n") - lines += 1 - logging.info(f"Completed extraction of texts: {lines} lines written to file.") + return right_bound_duplicate_filtered_source + def _write_to_file(self, texts: Iterable[str], environment_path: str) -> None: + return write_to_texts_file(texts, environment_path) -class MinHashLSHDeduplicator: - def __init__(self, seeds: Union[int, np.ndarray], char_ngram: int, bands: int): - hasher = minhash.MinHasher(seeds=seeds, char_ngram=char_ngram) - self.lsh_cache = cache.Cache(num_bands=bands, hasher=hasher) - def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) -> List[str]: - if clear: - self.lsh_cache.clear() - - added = set() - deduplicated_docs = [] - for i, j in self.get_all_duplicates(docs, min_jaccard): - if i not in added and j not in added: - added.add(i) - added.add(j) - deduplicated_docs.append(docs[i]) - - return deduplicated_docs +class RandomSplitTextsFromData(ExtractTextsFromData): + def __init__( + self, + text_source: Iterable[str], + preprocessor: Optional[IPreprocessor] = None, + min_text_length: int = MIN_TEXT_LEN, + min_text_token_length: int = MIN_TEXT_TOKEN_LENGTH, + seeds: Union[int, np.ndarray] = 100, + test_size: Union[float, int] = 0.1 + ) -> None: + super().__init__(text_source, preprocessor, min_text_length, min_text_token_length, seeds,) + # if test_size > 1 (absolute number) there is a probability that we won't reach this number + # if size of full dataset is twice less or approximately equal than test_size + self.test_ratio = 0.5 if test_size > 1 else test_size + self.test_size = test_size if test_size > 1 else float("inf") - def get_all_duplicates(self, docs: List[str], min_jaccard: float) -> Set[Tuple[int, int]]: - for i, doc in enumerate(docs): - self.lsh_cache.add_doc(doc, i) - return self.lsh_cache.get_all_duplicates(min_jaccard) + def _write_to_file(self, texts: Iterable[str], environment_path: str) -> None: + return write_to_train_val_files(texts, environment_path, test_ratio=self.test_ratio, test_size=self.test_size) diff --git a/src/language_model/tokenization/trainer.py b/src/language_model/tokenization/trainer.py index 0f5b0ae..db5863f 100644 --- a/src/language_model/tokenization/trainer.py +++ b/src/language_model/tokenization/trainer.py @@ -61,4 +61,4 @@ def __init__( def execute(self, environment_path: str) -> None: self.tokenizer.train_from_iterator(self.iterator, trainer=self.trainer) - self.tokenizer.save(path=os.path.join(environment_path, self.tokenizer_file_name), pretty=True) + self.tokenizer.save(os.path.join(environment_path, self.tokenizer_file_name), pretty=True) From 1e1f1f62f11c4e76827d82c80c5ca1b4330e414c Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 6 Sep 2021 14:40:57 +0300 Subject: [PATCH 09/28] saving PreTrainedTokenizer --- src/language_model/modelling/trainer.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/language_model/modelling/trainer.py b/src/language_model/modelling/trainer.py index 782253a..407f7d3 100644 --- a/src/language_model/modelling/trainer.py +++ b/src/language_model/modelling/trainer.py @@ -28,6 +28,22 @@ def execute(self, environment_path: str) -> None: self.trainer.save_model(os.path.join(environment_path, self.model_folder_name)) +class TransformersTrainTaskWithTokenizerSaving(TransformersTrainTask): + def __init__( + self, + trainer: Trainer, + checkpoint_folder: Optional[str] = None, + model_folder_name: str = "model", + tokenizer_folder_name: str = "tokenizer", + ): + super().__init__(trainer, checkpoint_folder, model_folder_name) + self.tokenizer_folder_name = tokenizer_folder_name + + def execute(self, environment_path: str) -> None: + super().execute(environment_path) + self.trainer.tokenizer.save_pretrained(os.path.join(environment_path, self.tokenizer_folder_name)) + + class RobertaForMaskedLMTrainTask(ITask): def __init__( self, From 4300cfd443d1ee063db9a64dadbe6a84a433b813 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 6 Sep 2021 14:41:15 +0300 Subject: [PATCH 10/28] new configs --- .../cyr/gpt/extract_texts/in-house-data.py | 27 +++++++++++++++ .../train-validation-open-data.py | 33 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 configs/cyr/gpt/extract_texts/in-house-data.py create mode 100644 configs/cyr/gpt/extract_texts/train-validation-open-data.py diff --git a/configs/cyr/gpt/extract_texts/in-house-data.py b/configs/cyr/gpt/extract_texts/in-house-data.py new file mode 100644 index 0000000..90262cd --- /dev/null +++ b/configs/cyr/gpt/extract_texts/in-house-data.py @@ -0,0 +1,27 @@ +from pynlple.processing.preprocessor import ( + StackingPreprocessor, + HtmlTagReplacer, + URLReplacer, + MultiNonLetterReplacer, + MultiLetterReplacer, +) + +from language_model.data.extract import ExtractTextsFromData, FromLoadedYsDataSource + +YS_FOLDER_PATHS = ["outputs/cyr/gpt/load_data/in-house"] + + +preprocessor = StackingPreprocessor( + [ + HtmlTagReplacer(), + URLReplacer(), + MultiNonLetterReplacer(include_digits=False), + MultiLetterReplacer() + ] +) + +ys_train = FromLoadedYsDataSource(source_folder_paths=YS_FOLDER_PATHS) + +task = ExtractTextsFromData( + text_source=ys_train, preprocessor=preprocessor, seeds=100, char_ngram=20, bands=20, min_jaccard=0.9 +) diff --git a/configs/cyr/gpt/extract_texts/train-validation-open-data.py b/configs/cyr/gpt/extract_texts/train-validation-open-data.py new file mode 100644 index 0000000..bcc5db7 --- /dev/null +++ b/configs/cyr/gpt/extract_texts/train-validation-open-data.py @@ -0,0 +1,33 @@ +from itertools import chain + +from datasets import load_dataset +from pynlple.processing.preprocessor import ( + StackingPreprocessor, + HtmlTagReplacer, + URLReplacer, + MultiNonLetterReplacer, + MultiLetterReplacer, +) + +from language_model.data.extract import PostWikiExtractorDataSource, RandomSplitTextsFromData + +WIKI_EXTRACTED_PATH = "outputs/cyr/gpt/load_data/wiki/ukwiki-latest-pages-articles" + + +preprocessor = StackingPreprocessor( + [ + HtmlTagReplacer(), + URLReplacer(), + MultiNonLetterReplacer(include_digits=False), + MultiLetterReplacer() + ] +) + +oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train")) +cc100_train = (item["text"] for item in load_dataset("cc100", lang="uk", split="train")) +wiki_train = (item["text"] for item in PostWikiExtractorDataSource(WIKI_EXTRACTED_PATH)) + + +task = RandomSplitTextsFromData( + text_source=chain(oscar_train, cc100_train, wiki_train), preprocessor=preprocessor, test_size=5_000 +) From e889a3f560ebbefd4cb0616d138c369de667460c Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 6 Sep 2021 14:41:53 +0300 Subject: [PATCH 11/28] Iterable[Iterable[str]] -> Iterable[str] (should be merged outside) --- src/language_model/data/dataset.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/language_model/data/dataset.py b/src/language_model/data/dataset.py index 366ed51..2cede73 100644 --- a/src/language_model/data/dataset.py +++ b/src/language_model/data/dataset.py @@ -132,7 +132,7 @@ class FromIterableTextDataset(LazyDataset): def __init__( self, tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], - data_sources: Iterable[Iterable[str]], + data_source: Iterable[str], block_size: int, return_overflowing_tokens: bool = True, process_batch_size: int = 8192, @@ -140,20 +140,19 @@ def __init__( super().__init__() self.return_overflowing_tokens = return_overflowing_tokens self.tokenizer = tokenizer - self.data_sources = data_sources + self.data_source = data_source self.block_size = block_size self.process_batch_size = process_batch_size def _read_chunk(self) -> Iterator[List[str]]: lines: List[str] = [] - for data_source in self.data_sources: - for line in data_source: - if len(line) > 0 and not line.isspace(): - lines.append(line) - - if len(lines) == self.process_batch_size: - yield lines - lines = [] + for line in self.data_source: + if len(line) > 0 and not line.isspace(): + lines.append(line) + + if len(lines) == self.process_batch_size: + yield lines + lines = [] if len(lines) > 0: yield lines From acf89eda2027721eb562cf7bb91caeb3b4b86475 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 6 Sep 2021 14:42:20 +0300 Subject: [PATCH 12/28] writing to single/train-val files --- src/language_model/data/utils.py | 41 ++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 src/language_model/data/utils.py diff --git a/src/language_model/data/utils.py b/src/language_model/data/utils.py new file mode 100644 index 0000000..bb652c9 --- /dev/null +++ b/src/language_model/data/utils.py @@ -0,0 +1,41 @@ +import io +import logging +import os +import random +from typing import Iterable + + +def write_to_texts_file(texts: Iterable[str], environment_path: str) -> None: + output_file_path = os.path.join(environment_path, "texts.txt") + lines = 0 + with io.open(output_file_path, mode="wt", encoding="utf-8") as output_stream: + for line in texts: + output_stream.write(line) + output_stream.write("\n") + lines += 1 + logging.info(f"Completed extraction of texts: {lines} lines written to file.") + + +def write_to_train_val_files(texts: Iterable[str], environment_path: str, test_ratio: float, test_size: int): + train_file_path = os.path.join(environment_path, "train.txt") + validation_file_path = os.path.join(environment_path, "validation.txt") + train_lines = 0 + test_lines = 0 + train_stream = io.open(train_file_path, mode="wt", encoding="utf-8") + validation_stream = io.open(validation_file_path, mode="wt", encoding="utf-8") + for line in texts: + if test_ratio > random.random() and test_lines <= test_size: + validation_stream.write(line) + validation_stream.write("\n") + test_lines += 1 + else: + train_stream.write(line) + train_stream.write("\n") + train_lines += 1 + + validation_stream.close() + train_stream.close() + logging.info( + f"Completed extraction of texts: {test_lines} lines written to test file " + f"and {train_lines} lines written to train file" + ) From 6d5697536fc9c5cd74f7694dbaad067a46fa5bff Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 6 Sep 2021 14:43:04 +0300 Subject: [PATCH 13/28] steps --- configs/cyr/gpt/README.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 configs/cyr/gpt/README.md diff --git a/configs/cyr/gpt/README.md b/configs/cyr/gpt/README.md new file mode 100644 index 0000000..7aed7b7 --- /dev/null +++ b/configs/cyr/gpt/README.md @@ -0,0 +1,9 @@ +# Steps: + +1) `python run.py --task configs/cyr/gpt/load_data/wiki.py` +2) `python -m wikiextractor.WikiExtractor outputs/cyr/gpt/load_data/wiki/ukwiki-latest-pages-articles.xml.bz2 -o outputs/cyr/gpt/load_data/wiki/ukwiki-latest-pages-articles -b 1M --no-templates` +3) `python run.py --task configs/cyr/gpt/load_data/in-house.py` +4) `python run.py --task configs/cyr/gpt/extract_texts/train-validation-open-data.py` +5) `python run.py --task configs/cyr/gpt/extract_texts/in-house-data.py` +6) `python run.py --task configs/cyr/gpt/train_tokenizer/ukr-gpt.py` +7) `shuf outputs/cyr/gpt/extract_texts/train-validation-open-data/train.txt -o outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt` From d6f5d73007e7395bf6f726c42a26d6f61a37b09f Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 6 Sep 2021 14:58:14 +0300 Subject: [PATCH 14/28] pre-commit fixes --- .../cyr/gpt/extract_texts/in-house-data.py | 13 ++--- .../train-validation-open-data.py | 13 ++--- configs/cyr/gpt/train_model/ukr-gpt.py | 2 +- configs/cyr/gpt/train_tokenizer/ukr-gpt.py | 6 +-- requirements.txt | 10 ++-- src/language_model/data/extract.py | 48 +++++++++++-------- src/language_model/data/utils.py | 2 +- src/language_model/modelling/trainer.py | 10 ++-- 8 files changed, 48 insertions(+), 56 deletions(-) diff --git a/configs/cyr/gpt/extract_texts/in-house-data.py b/configs/cyr/gpt/extract_texts/in-house-data.py index 90262cd..5852472 100644 --- a/configs/cyr/gpt/extract_texts/in-house-data.py +++ b/configs/cyr/gpt/extract_texts/in-house-data.py @@ -1,9 +1,9 @@ from pynlple.processing.preprocessor import ( - StackingPreprocessor, HtmlTagReplacer, - URLReplacer, - MultiNonLetterReplacer, MultiLetterReplacer, + MultiNonLetterReplacer, + StackingPreprocessor, + URLReplacer, ) from language_model.data.extract import ExtractTextsFromData, FromLoadedYsDataSource @@ -12,12 +12,7 @@ preprocessor = StackingPreprocessor( - [ - HtmlTagReplacer(), - URLReplacer(), - MultiNonLetterReplacer(include_digits=False), - MultiLetterReplacer() - ] + [HtmlTagReplacer(), URLReplacer(), MultiNonLetterReplacer(include_digits=False), MultiLetterReplacer()] ) ys_train = FromLoadedYsDataSource(source_folder_paths=YS_FOLDER_PATHS) diff --git a/configs/cyr/gpt/extract_texts/train-validation-open-data.py b/configs/cyr/gpt/extract_texts/train-validation-open-data.py index bcc5db7..91418e1 100644 --- a/configs/cyr/gpt/extract_texts/train-validation-open-data.py +++ b/configs/cyr/gpt/extract_texts/train-validation-open-data.py @@ -2,11 +2,11 @@ from datasets import load_dataset from pynlple.processing.preprocessor import ( - StackingPreprocessor, HtmlTagReplacer, - URLReplacer, - MultiNonLetterReplacer, MultiLetterReplacer, + MultiNonLetterReplacer, + StackingPreprocessor, + URLReplacer, ) from language_model.data.extract import PostWikiExtractorDataSource, RandomSplitTextsFromData @@ -15,12 +15,7 @@ preprocessor = StackingPreprocessor( - [ - HtmlTagReplacer(), - URLReplacer(), - MultiNonLetterReplacer(include_digits=False), - MultiLetterReplacer() - ] + [HtmlTagReplacer(), URLReplacer(), MultiNonLetterReplacer(include_digits=False), MultiLetterReplacer()] ) oscar_train = (item["text"] for item in load_dataset("oscar", "unshuffled_deduplicated_uk", split="train")) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index a5eda37..43c4dd9 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -37,7 +37,7 @@ # data train_data_source = ShuffledSources( (text for text in LineByLineSource(IN_HOUSE_TRAIN_DATA_PATH)), - (text for text in LineByLineSource(OPEN_TRAIN_DATA_PATH)) + (text for text in LineByLineSource(OPEN_TRAIN_DATA_PATH)), ) validation_data_path = LineByLineSource(VALIDATION_DATA_PATH) diff --git a/configs/cyr/gpt/train_tokenizer/ukr-gpt.py b/configs/cyr/gpt/train_tokenizer/ukr-gpt.py index c0250d3..8148942 100644 --- a/configs/cyr/gpt/train_tokenizer/ukr-gpt.py +++ b/configs/cyr/gpt/train_tokenizer/ukr-gpt.py @@ -20,8 +20,4 @@ ) trainer = trainers.BpeTrainer(vocab_size=50264, special_tokens=["<|endoftext|>"]) -task = TrainTokenizerTask( - tokenizer=tokenizer, - iterator=train_data_source, - trainer=trainer, -) +task = TrainTokenizerTask(tokenizer=tokenizer, iterator=train_data_source, trainer=trainer) diff --git a/requirements.txt b/requirements.txt index 3b44691..728fc14 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,11 @@ +bs4==0.0.1 +Cython==3.0.0a9 datasets==1.11.0 +lxml==4.6.3 +numpy==1.19.5 pyNlple==0.7.5 tokenizers==0.10.1 torch==1.8.1 transformers==4.4.2 -wikiextractor==3.0.4 -bs4==0.0.1 wget==3.2 -numpy==1.19.5 -Cython==3.0.0a9 -lxml==4.6.3 +wikiextractor==3.0.4 diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 34ad88f..7860b4f 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -7,19 +7,20 @@ from concurrent.futures import ProcessPoolExecutor from itertools import islice from pathlib import Path -from typing import Any, Callable, Dict, Hashable, Iterable, Iterator, Optional, Union, List, Set, Tuple, Generator +from typing import Any, Callable, Dict, Generator, Hashable, Iterable, Iterator, List, Optional, Set, Tuple, Union import numpy as np from bs4 import BeautifulSoup from ds_shared.loading import load_pickle -from lsh import minhash, cache from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, StackingSource from pynlple.data.filesource import FilePathSource from pynlple.data.source import Source from pynlple.processing.preprocessor import IPreprocessor -from .utils import write_to_texts_file, write_to_train_val_files +from lsh import cache, minhash + from ..pipeline import ITask +from .utils import write_to_texts_file, write_to_train_val_files MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -124,6 +125,7 @@ def __iter__(self) -> Iterator[Any]: class PostWikiExtractorDataSource(Source): """Provides wiki articles preprocessed by `python -m wikiextractor.WikiExtractor dump.xml.bz2 ...`""" + def __init__(self, article_dir: str) -> None: super().__init__() self.article_dir = Path(article_dir) @@ -169,7 +171,7 @@ def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) - if clear: self.lsh_cache.clear() - duplicate_ids = set() + duplicate_ids: Set[int] = set() keep_form_duplicate_ids = set() for i, j in self.get_all_duplicates(docs, min_jaccard): if i not in duplicate_ids and j not in duplicate_ids: @@ -200,7 +202,7 @@ def in_memory_batch_deduplicate(self, docs: Iterable[str], min_jaccard: float, b yield from batch_docs def lsh_batch_deduplicate( - self, docs: Iterable[str], min_jaccard: float, batch_size: int, clear: bool = True + self, docs: Iterable[str], min_jaccard: float, batch_size: int, clear: bool = True ) -> Iterable[str]: """ batch_size: max size of docs will be deduplicated at time, while `batch_docs` list will be extended @@ -210,7 +212,8 @@ def lsh_batch_deduplicate( self.lsh_cache.clear() start_id, end_id = 0, batch_size - duplicate_ids, keep_form_duplicate_ids = set(), set() + keep_form_duplicate_ids: Set[int] = set() + duplicate_ids: Set[int] = set() batch_docs = list(islice(docs, batch_size)) @@ -260,8 +263,7 @@ def lsh_batch_deduplicate( def get_all_duplicates(self, docs: Iterable[str], min_jaccard: float, start_id: int = 0) -> Set[Tuple[int, int]]: self._cache_texts_parallel(docs, start_id=start_id) - import pdb; pdb.set_trace() - return self.lsh_cache.get_all_duplicates(min_jaccard) + return self.lsh_cache.get_all_duplicates(min_jaccard) # type: ignore def _cache_texts(self, docs: Iterable[str], start_id: int = 0) -> None: for i, doc in enumerate(docs, start_id): @@ -281,7 +283,7 @@ def __init__( preprocessor: Optional[IPreprocessor] = None, min_text_length: int = MIN_TEXT_LEN, min_text_token_length: int = MIN_TEXT_TOKEN_LENGTH, - cache_size: int = 100_000 + cache_size: int = 100_000, ) -> None: super().__init__() self.preprocessor = preprocessor @@ -293,10 +295,12 @@ def __init__( def execute(self, environment_path: str) -> None: self._write_to_file(self._deduplicate(self._filter(self._preprocess())), environment_path=environment_path) - def _preprocess(self) -> Source: - return MappingSource(self.text_source, function=self.preprocessor.preprocess) + def _preprocess(self) -> Union[Source, Iterable[str]]: + if self.preprocessor is not None: + return MappingSource(self.text_source, function=self.preprocessor.preprocess) + return self.text_source - def _filter(self, preprocessed_source: Source) -> Source: + def _filter(self, preprocessed_source: Union[Source, Iterable[str]]) -> Source: return FilteringSource( preprocessed_source, condition=lambda x: len(x) >= self.min_text_length and len(x.split()) >= self.min_text_token_length, @@ -325,19 +329,21 @@ def _write_to_file(self, texts: Iterable[str], environment_path: str) -> None: class RandomSplitTextsFromData(ExtractTextsFromData): def __init__( - self, - text_source: Iterable[str], - preprocessor: Optional[IPreprocessor] = None, - min_text_length: int = MIN_TEXT_LEN, - min_text_token_length: int = MIN_TEXT_TOKEN_LENGTH, - seeds: Union[int, np.ndarray] = 100, - test_size: Union[float, int] = 0.1 + self, + text_source: Iterable[str], + preprocessor: Optional[IPreprocessor] = None, + min_text_length: int = MIN_TEXT_LEN, + min_text_token_length: int = MIN_TEXT_TOKEN_LENGTH, + seeds: Union[int, np.ndarray] = 100, + test_size: Union[float, int] = 0.1, ) -> None: - super().__init__(text_source, preprocessor, min_text_length, min_text_token_length, seeds,) + super().__init__(text_source, preprocessor, min_text_length, min_text_token_length, seeds) # if test_size > 1 (absolute number) there is a probability that we won't reach this number # if size of full dataset is twice less or approximately equal than test_size self.test_ratio = 0.5 if test_size > 1 else test_size self.test_size = test_size if test_size > 1 else float("inf") def _write_to_file(self, texts: Iterable[str], environment_path: str) -> None: - return write_to_train_val_files(texts, environment_path, test_ratio=self.test_ratio, test_size=self.test_size) + return write_to_train_val_files( + texts, environment_path, test_ratio=self.test_ratio, test_size=self.test_size # type: ignore + ) diff --git a/src/language_model/data/utils.py b/src/language_model/data/utils.py index bb652c9..9f672f2 100644 --- a/src/language_model/data/utils.py +++ b/src/language_model/data/utils.py @@ -16,7 +16,7 @@ def write_to_texts_file(texts: Iterable[str], environment_path: str) -> None: logging.info(f"Completed extraction of texts: {lines} lines written to file.") -def write_to_train_val_files(texts: Iterable[str], environment_path: str, test_ratio: float, test_size: int): +def write_to_train_val_files(texts: Iterable[str], environment_path: str, test_ratio: float, test_size: int) -> None: train_file_path = os.path.join(environment_path, "train.txt") validation_file_path = os.path.join(environment_path, "validation.txt") train_lines = 0 diff --git a/src/language_model/modelling/trainer.py b/src/language_model/modelling/trainer.py index 407f7d3..aed7ade 100644 --- a/src/language_model/modelling/trainer.py +++ b/src/language_model/modelling/trainer.py @@ -30,11 +30,11 @@ def execute(self, environment_path: str) -> None: class TransformersTrainTaskWithTokenizerSaving(TransformersTrainTask): def __init__( - self, - trainer: Trainer, - checkpoint_folder: Optional[str] = None, - model_folder_name: str = "model", - tokenizer_folder_name: str = "tokenizer", + self, + trainer: Trainer, + checkpoint_folder: Optional[str] = None, + model_folder_name: str = "model", + tokenizer_folder_name: str = "tokenizer", ): super().__init__(trainer, checkpoint_folder, model_folder_name) self.tokenizer_folder_name = tokenizer_folder_name From 13af9e1340bf54961b68934135d3af88b6279ea3 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Tue, 7 Sep 2021 17:51:19 +0300 Subject: [PATCH 15/28] change paths, save_total_limit, comment lsh dependency --- configs/cyr/gpt/train_model/ukr-gpt.py | 20 ++- src/language_model/data/extract.py | 231 +++++++++++++------------ 2 files changed, 129 insertions(+), 122 deletions(-) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 43c4dd9..fb8b951 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -12,11 +12,17 @@ from language_model.modelling.trainer import TransformersTrainTaskWithTokenizerSaving from language_model.tokenization.factory import FAST_TOKENIZER_DEFAULT_FILE_NAME -TOKENIZER_PATH = f"outputs/cyr/gpt/train_tokenizer/ukr-gpt/{FAST_TOKENIZER_DEFAULT_FILE_NAME}" +TOKENIZER_PATH = ( + f"/mnt/lost+found/language-models/outputs/cyr/gpt/train_tokenizer/ukr-gpt/{FAST_TOKENIZER_DEFAULT_FILE_NAME}" +) -IN_HOUSE_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/in-house-data/texts.txt" -OPEN_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt" -VALIDATION_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/validation.txt" +IN_HOUSE_TRAIN_DATA_PATH = "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/in-house-data/texts.txt" +OPEN_TRAIN_DATA_PATH = ( + "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt" +) +VALIDATION_DATA_PATH = ( + "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/train-validation-open-data/validation.txt" +) MODEL_MAX_LENGTH = 1024 @@ -59,10 +65,10 @@ per_device_train_batch_size=8, # overall bs = 8 * 8 * num_gpus (GPT2 used 512) gradient_accumulation_steps=8, per_device_eval_batch_size=4, - output_dir="temp", - overwrite_output_dir=True, + output_dir="checkpoints", + overwrite_output_dir=False, save_steps=250000, - save_total_limit=2, + save_total_limit=10, prediction_loss_only=False, learning_rate=0.0002, # (was manually tuned in GPT2 on held-out validation) warmup_ratio=0.004, diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 7860b4f..871ff13 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -17,11 +17,12 @@ from pynlple.data.source import Source from pynlple.processing.preprocessor import IPreprocessor -from lsh import cache, minhash - from ..pipeline import ITask from .utils import write_to_texts_file, write_to_train_val_files +# from lsh import cache, minhash + + MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -161,119 +162,119 @@ def __iter__(self) -> Iterator[str]: yield from text_source -class MinHashLSHDeduplicator: - def __init__(self, seeds: Union[int, np.ndarray], char_ngram: int, bands: int, workers: int = -1): - self.workers = multiprocessing.cpu_count() if workers == -1 else workers - hasher = minhash.MinHasher(seeds=seeds, char_ngram=char_ngram, random_state=42) - self.lsh_cache = cache.Cache(num_bands=bands, hasher=hasher) - - def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) -> List[str]: - if clear: - self.lsh_cache.clear() - - duplicate_ids: Set[int] = set() - keep_form_duplicate_ids = set() - for i, j in self.get_all_duplicates(docs, min_jaccard): - if i not in duplicate_ids and j not in duplicate_ids: - keep_form_duplicate_ids.add(i) - elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: - keep_form_duplicate_ids.remove(j) - duplicate_ids.add(i) - duplicate_ids.add(j) - - keep = set(range(len(docs))) - duplicate_ids | keep_form_duplicate_ids - - return [docs[i] for i in keep] - - def in_memory_batch_deduplicate(self, docs: Iterable[str], min_jaccard: float, batch_size: int) -> Iterable[str]: - batch_docs = list(islice(docs, batch_size)) - while batch_docs: - batch_docs = self.deduplicate(batch_docs, min_jaccard=min_jaccard, clear=True) - gc.collect() - add_into_batch = batch_size - len(batch_docs) - if add_into_batch > 0: - new_docs = list(islice(docs, add_into_batch)) - if not new_docs: - break - batch_docs.extend(new_docs) - else: - yield from batch_docs - batch_docs = list(islice(docs, batch_size)) - yield from batch_docs - - def lsh_batch_deduplicate( - self, docs: Iterable[str], min_jaccard: float, batch_size: int, clear: bool = True - ) -> Iterable[str]: - """ - batch_size: max size of docs will be deduplicated at time, while `batch_docs` list will be extended - until `batch_size` unique docs will be collected into it - """ - if clear: - self.lsh_cache.clear() - - start_id, end_id = 0, batch_size - keep_form_duplicate_ids: Set[int] = set() - duplicate_ids: Set[int] = set() - - batch_docs = list(islice(docs, batch_size)) - - while batch_docs: - - new_duplicate_ids = set() - # batch_docs[start_id:] to process just recently appended docs, - # start_id = start_id to set new ids for recently appended docs - for i, j in self.get_all_duplicates(batch_docs[start_id:], min_jaccard, start_id=start_id): - if i not in duplicate_ids and j not in duplicate_ids: - keep_form_duplicate_ids.add(i) - elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: - keep_form_duplicate_ids.remove(j) - duplicate_ids.add(i) - duplicate_ids.add(j) - new_duplicate_ids.add(i) - new_duplicate_ids.add(j) - - # new_duplicate_ids - to clear duplicates from recently appended - # docs, as previous duplicate ids have already cleared - drop_ids = new_duplicate_ids - keep_form_duplicate_ids - - if drop_ids: - for i in drop_ids: - self.lsh_cache.remove_id(i) - - start_id = end_id - end_id = start_id + len(drop_ids) - else: - # all non duplicated + keep_form_duplicate_ids - for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: - yield batch_docs[i] - - # new batch - self.lsh_cache.clear() - start_id, end_id = 0, batch_size - duplicate_ids, keep_form_duplicate_ids = set(), set() - batch_docs = [] - - # if drop_ids: we append next len(drop_ids) examples - batch_docs.extend(islice(docs, end_id - start_id)) - - if batch_docs: - # all non duplicated + keep_form_duplicate_ids - for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: - yield batch_docs[i] - - def get_all_duplicates(self, docs: Iterable[str], min_jaccard: float, start_id: int = 0) -> Set[Tuple[int, int]]: - self._cache_texts_parallel(docs, start_id=start_id) - return self.lsh_cache.get_all_duplicates(min_jaccard) # type: ignore - - def _cache_texts(self, docs: Iterable[str], start_id: int = 0) -> None: - for i, doc in enumerate(docs, start_id): - self.lsh_cache.add_doc(doc, i) - - def _cache_texts_parallel(self, docs: Iterable[str], start_id: int = 0) -> None: - encoded_docs = (doc.encode("utf8") for doc in docs) - with ProcessPoolExecutor(max_workers=self.workers) as executor: - for i, fingerprint in enumerate(executor.map(self.lsh_cache.hasher.fingerprint, encoded_docs), start_id): - self.lsh_cache.add_fingerprint(fingerprint, i) +# class MinHashLSHDeduplicator: +# def __init__(self, seeds: Union[int, np.ndarray], char_ngram: int, bands: int, workers: int = -1): +# self.workers = multiprocessing.cpu_count() if workers == -1 else workers +# hasher = minhash.MinHasher(seeds=seeds, char_ngram=char_ngram, random_state=42) +# self.lsh_cache = cache.Cache(num_bands=bands, hasher=hasher) +# +# def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) -> List[str]: +# if clear: +# self.lsh_cache.clear() +# +# duplicate_ids: Set[int] = set() +# keep_form_duplicate_ids = set() +# for i, j in self.get_all_duplicates(docs, min_jaccard): +# if i not in duplicate_ids and j not in duplicate_ids: +# keep_form_duplicate_ids.add(i) +# elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: +# keep_form_duplicate_ids.remove(j) +# duplicate_ids.add(i) +# duplicate_ids.add(j) +# +# keep = set(range(len(docs))) - duplicate_ids | keep_form_duplicate_ids +# +# return [docs[i] for i in keep] +# +# def in_memory_batch_deduplicate(self, docs: Iterable[str], min_jaccard: float, batch_size: int) -> Iterable[str]: +# batch_docs = list(islice(docs, batch_size)) +# while batch_docs: +# batch_docs = self.deduplicate(batch_docs, min_jaccard=min_jaccard, clear=True) +# gc.collect() +# add_into_batch = batch_size - len(batch_docs) +# if add_into_batch > 0: +# new_docs = list(islice(docs, add_into_batch)) +# if not new_docs: +# break +# batch_docs.extend(new_docs) +# else: +# yield from batch_docs +# batch_docs = list(islice(docs, batch_size)) +# yield from batch_docs +# +# def lsh_batch_deduplicate( +# self, docs: Iterable[str], min_jaccard: float, batch_size: int, clear: bool = True +# ) -> Iterable[str]: +# """ +# batch_size: max size of docs will be deduplicated at time, while `batch_docs` list will be extended +# until `batch_size` unique docs will be collected into it +# """ +# if clear: +# self.lsh_cache.clear() +# +# start_id, end_id = 0, batch_size +# keep_form_duplicate_ids: Set[int] = set() +# duplicate_ids: Set[int] = set() +# +# batch_docs = list(islice(docs, batch_size)) +# +# while batch_docs: +# +# new_duplicate_ids = set() +# # batch_docs[start_id:] to process just recently appended docs, +# # start_id = start_id to set new ids for recently appended docs +# for i, j in self.get_all_duplicates(batch_docs[start_id:], min_jaccard, start_id=start_id): +# if i not in duplicate_ids and j not in duplicate_ids: +# keep_form_duplicate_ids.add(i) +# elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: +# keep_form_duplicate_ids.remove(j) +# duplicate_ids.add(i) +# duplicate_ids.add(j) +# new_duplicate_ids.add(i) +# new_duplicate_ids.add(j) +# +# # new_duplicate_ids - to clear duplicates from recently appended +# # docs, as previous duplicate ids have already cleared +# drop_ids = new_duplicate_ids - keep_form_duplicate_ids +# +# if drop_ids: +# for i in drop_ids: +# self.lsh_cache.remove_id(i) +# +# start_id = end_id +# end_id = start_id + len(drop_ids) +# else: +# # all non duplicated + keep_form_duplicate_ids +# for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: +# yield batch_docs[i] +# +# # new batch +# self.lsh_cache.clear() +# start_id, end_id = 0, batch_size +# duplicate_ids, keep_form_duplicate_ids = set(), set() +# batch_docs = [] +# +# # if drop_ids: we append next len(drop_ids) examples +# batch_docs.extend(islice(docs, end_id - start_id)) +# +# if batch_docs: +# # all non duplicated + keep_form_duplicate_ids +# for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: +# yield batch_docs[i] +# +# def get_all_duplicates(self, docs: Iterable[str], min_jaccard: float, start_id: int = 0) -> Set[Tuple[int, int]]: +# self._cache_texts_parallel(docs, start_id=start_id) +# return self.lsh_cache.get_all_duplicates(min_jaccard) # type: ignore +# +# def _cache_texts(self, docs: Iterable[str], start_id: int = 0) -> None: +# for i, doc in enumerate(docs, start_id): +# self.lsh_cache.add_doc(doc, i) +# +# def _cache_texts_parallel(self, docs: Iterable[str], start_id: int = 0) -> None: +# encoded_docs = (doc.encode("utf8") for doc in docs) +# with ProcessPoolExecutor(max_workers=self.workers) as executor: +# for i, fingerprint in enumerate(executor.map(self.lsh_cache.hasher.fingerprint, encoded_docs), start_id): +# self.lsh_cache.add_fingerprint(fingerprint, i) class ExtractTextsFromData(ITask): From 66b25c8ba0c9660af1ddd6942e08d9b9d78bd520 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Thu, 9 Sep 2021 14:56:18 +0300 Subject: [PATCH 16/28] separate data step: saving input ids --- .../gpt/extract_vectors/vectorize-train.py | 27 +++ .../extract_vectors/vectorize-validation.py | 22 ++ configs/cyr/gpt/train_model/ukr-gpt.py | 38 +--- src/language_model/data/dataset.py | 96 +++----- src/language_model/data/extract.py | 209 ++++++++---------- 5 files changed, 170 insertions(+), 222 deletions(-) create mode 100644 configs/cyr/gpt/extract_vectors/vectorize-train.py create mode 100644 configs/cyr/gpt/extract_vectors/vectorize-validation.py diff --git a/configs/cyr/gpt/extract_vectors/vectorize-train.py b/configs/cyr/gpt/extract_vectors/vectorize-train.py new file mode 100644 index 0000000..f50a010 --- /dev/null +++ b/configs/cyr/gpt/extract_vectors/vectorize-train.py @@ -0,0 +1,27 @@ +import os + +from transformers import PreTrainedTokenizerFast + +from language_model.data.extract import LineByLineSource, ShuffledSources, ExtractVectorsFromTexts + +TOKENIZER_PATH = "outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" + +IN_HOUSE_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/in-house-data/texts.txt" +OPEN_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt" +MODEL_MAX_LENGTH = 1024 + +# data +train_data_source = ShuffledSources( + (text for text in LineByLineSource(IN_HOUSE_TRAIN_DATA_PATH)), + (text for text in LineByLineSource(OPEN_TRAIN_DATA_PATH)), +) + +os.environ["TOKENIZERS_PARALLELISM"] = "true" + +task = ExtractVectorsFromTexts( + data_source=train_data_source, + tokenizer=PreTrainedTokenizerFast.from_pretrained(TOKENIZER_PATH), + block_size=MODEL_MAX_LENGTH, + process_batch_size=100_000, + workers=18 +) diff --git a/configs/cyr/gpt/extract_vectors/vectorize-validation.py b/configs/cyr/gpt/extract_vectors/vectorize-validation.py new file mode 100644 index 0000000..0f08f06 --- /dev/null +++ b/configs/cyr/gpt/extract_vectors/vectorize-validation.py @@ -0,0 +1,22 @@ +import os + +from transformers import PreTrainedTokenizerFast + +from language_model.data.extract import LineByLineSource, ExtractVectorsFromTexts + +TOKENIZER_PATH = "outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" + +OPEN_VALIDATION_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/validation.txt" +MODEL_MAX_LENGTH = 1024 + +# data +validation_data_source = LineByLineSource(OPEN_VALIDATION_DATA_PATH) +os.environ["TOKENIZERS_PARALLELISM"] = "true" + +task = ExtractVectorsFromTexts( + data_source=validation_data_source, + tokenizer=PreTrainedTokenizerFast.from_pretrained(TOKENIZER_PATH), + block_size=MODEL_MAX_LENGTH, + process_batch_size=100_000, + workers=18 +) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index fb8b951..d96ad5b 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -7,52 +7,32 @@ TrainingArguments, ) -from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, GroupTextForCasualLMDataset -from language_model.data.extract import LineByLineSource, ShuffledSources +from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, FromInputIdsDataset from language_model.modelling.trainer import TransformersTrainTaskWithTokenizerSaving -from language_model.tokenization.factory import FAST_TOKENIZER_DEFAULT_FILE_NAME TOKENIZER_PATH = ( - f"/mnt/lost+found/language-models/outputs/cyr/gpt/train_tokenizer/ukr-gpt/{FAST_TOKENIZER_DEFAULT_FILE_NAME}" + f"/mnt/lost+found/language-models/outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" ) -IN_HOUSE_TRAIN_DATA_PATH = "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/in-house-data/texts.txt" -OPEN_TRAIN_DATA_PATH = ( - "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt" +TRAIN_IDS_PATH = ( + "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/vectorize-train/processed_batch.jsonl" ) -VALIDATION_DATA_PATH = ( - "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/train-validation-open-data/validation.txt" +VALIDATION_IDS_PATH = ( + "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/vectorize-validation/processed_batch.jsonl" ) MODEL_MAX_LENGTH = 1024 # tokenizer -tokenizer = PreTrainedTokenizerFast( - tokenizer_file=TOKENIZER_PATH, model_max_length=MODEL_MAX_LENGTH, padding_side="right" -) -tokenizer.add_special_tokens({"bos_token": "<|endoftext|>"}) -# basically `pad_token` wont be used, as DataCollatorForGroupTextForCasualLMDataset pack sequences up to max_length -# but to avoid an error within DataCollatorForGroupTextForCasualLMDataset -tokenizer.pad_token = tokenizer.bos_token - +tokenizer = PreTrainedTokenizerFast.from_pretrained(TOKENIZER_PATH) # model model_config = GPT2Config(vocab_size=len(tokenizer), bos_token_id=tokenizer.bos_token_id) model = GPT2LMHeadModel(model_config) # data -train_data_source = ShuffledSources( - (text for text in LineByLineSource(IN_HOUSE_TRAIN_DATA_PATH)), - (text for text in LineByLineSource(OPEN_TRAIN_DATA_PATH)), -) -validation_data_path = LineByLineSource(VALIDATION_DATA_PATH) - -train_dataset = GroupTextForCasualLMDataset( - tokenizer=tokenizer, data_source=train_data_source, block_size=MODEL_MAX_LENGTH -) -valid_dataset = GroupTextForCasualLMDataset( - tokenizer=tokenizer, data_source=validation_data_path, block_size=MODEL_MAX_LENGTH -) +train_dataset = FromInputIdsDataset(TRAIN_IDS_PATH) +valid_dataset = FromInputIdsDataset(VALIDATION_IDS_PATH) data_collator = DataCollatorForGroupTextForCasualLMDataset() diff --git a/src/language_model/data/dataset.py b/src/language_model/data/dataset.py index 2cede73..d37cfbc 100644 --- a/src/language_model/data/dataset.py +++ b/src/language_model/data/dataset.py @@ -1,14 +1,15 @@ import itertools +import json import logging -import math from itertools import chain -from typing import Dict, Iterable, Iterator, List, Optional, Sequence, Union +from typing import Dict, Iterable, Iterator, List, Optional, Sequence +import math import torch from torch._utils import _accumulate from torch.utils.data import Dataset from torch.utils.data.dataset import T_co -from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast +from transformers import PreTrainedTokenizer class LazyDataset(Dataset): @@ -128,79 +129,32 @@ def split_lazy_dataset(dataset: LazyDataset, portions: Sequence[float]) -> List[ return [LazySubset(dataset, portions_provider=portions_provider, portion_id=i) for i in range(len(portions))] -class FromIterableTextDataset(LazyDataset): - def __init__( - self, - tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], - data_source: Iterable[str], - block_size: int, - return_overflowing_tokens: bool = True, - process_batch_size: int = 8192, - ) -> None: - super().__init__() - self.return_overflowing_tokens = return_overflowing_tokens - self.tokenizer = tokenizer - self.data_source = data_source - self.block_size = block_size - self.process_batch_size = process_batch_size +class FromInputIdsDataset(LazyDataset): - def _read_chunk(self) -> Iterator[List[str]]: - lines: List[str] = [] - for line in self.data_source: - if len(line) > 0 and not line.isspace(): - lines.append(line) + def __init__(self, input_ids_file_path: str, ): + super(FromInputIdsDataset, self).__init__() + self.input_ids_file_path = input_ids_file_path - if len(lines) == self.process_batch_size: - yield lines - lines = [] - if len(lines) > 0: - yield lines + def _read_input_ids(self) -> List[List[int]]: + input_ids_list: List[List[int]] = [] + with open(self.input_ids_file_path, "r") as f: + for line in f: + line = line.strip() + if line: + input_ids = json.loads(line) + if input_ids: + input_ids_list.append(input_ids) + return input_ids_list def __linit_entries__(self) -> Sequence[T_co]: - logging.info(f"Creating features from data_sources") - entries: List[List[Dict[str, torch.Tensor]]] = [] - for lines in self._read_chunk(): - entries.append(self._extract_batch(lines)) - logging.info(f"Currently read total {sum(map(len, entries))} at end") - logging.info("Extracted and converted training data to `input_ids`.") - return list(chain.from_iterable(entries)) # type: ignore - - def _extract_batch(self, lines: List[str]) -> List[Dict[str, torch.Tensor]]: - raise NotImplementedError() - - -class GroupTextForCasualLMDataset(FromIterableTextDataset): - def _extract_batch(self, lines: List[str]) -> List[Dict[str, torch.Tensor]]: - batch_encoding: List[Dict[str, torch.Tensor]] = [] - current_line = [self.tokenizer.bos_token] - for line in lines: - tokens = self.tokenizer.tokenize(line) - if len(current_line) + len(tokens) + 1 <= self.block_size: - current_line.append(self.tokenizer.bos_token) - current_line.extend(tokens) - elif len(current_line) == self.block_size: - input_ids = self.tokenizer.convert_tokens_to_ids(current_line) - batch_encoding.append({"input_ids": torch.tensor(input_ids), "labels": torch.tensor(input_ids)}) - current_line = [self.tokenizer.bos_token] + tokens - else: - current_line.append(self.tokenizer.bos_token) - n_tokens_to_add = self.block_size - len(current_line) - current_line.extend(tokens[:n_tokens_to_add]) - input_ids = self.tokenizer.convert_tokens_to_ids(current_line) - batch_encoding.append({"input_ids": torch.tensor(input_ids), "labels": torch.tensor(input_ids)}) - - tokens = tokens[n_tokens_to_add:] - while len(tokens) >= self.block_size: - input_ids = self.tokenizer.convert_tokens_to_ids(tokens[: self.block_size]) - batch_encoding.append({"input_ids": torch.tensor(input_ids), "labels": torch.tensor(input_ids)}) - tokens = tokens[self.block_size :] - - current_line = tokens - return batch_encoding + logging.info(f"Start reading input ids") + entries = self._read_input_ids() + logging.info("input ids have been read") + return entries class DataCollatorForGroupTextForCasualLMDataset: - def __call__(self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: - input_ids = torch.stack([example["input_ids"] for example in examples], dim=0) - labels = torch.stack([example["labels"] for example in examples], dim=0) + def __call__(self, examples: List[List[int]]) -> Dict[str, torch.Tensor]: + input_ids = torch.tensor(examples, dtype=torch.int) + labels = torch.tensor(examples, dtype=torch.int) return {"input_ids": input_ids, "labels": labels} diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 871ff13..7005eb5 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -1,27 +1,27 @@ -import gc +import json import logging import multiprocessing +import os import random from collections import Hashable as HashableType from collections import OrderedDict from concurrent.futures import ProcessPoolExecutor -from itertools import islice from pathlib import Path -from typing import Any, Callable, Dict, Generator, Hashable, Iterable, Iterator, List, Optional, Set, Tuple, Union +from typing import Any, Callable, Dict, Generator, Hashable, Iterable, Iterator, List, Optional, Tuple, Union import numpy as np from bs4 import BeautifulSoup from ds_shared.loading import load_pickle +from math import ceil +from more_itertools import chunked from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, StackingSource from pynlple.data.filesource import FilePathSource from pynlple.data.source import Source from pynlple.processing.preprocessor import IPreprocessor +from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast -from ..pipeline import ITask from .utils import write_to_texts_file, write_to_train_val_files - -# from lsh import cache, minhash - +from ..pipeline import ITask MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -162,121 +162,6 @@ def __iter__(self) -> Iterator[str]: yield from text_source -# class MinHashLSHDeduplicator: -# def __init__(self, seeds: Union[int, np.ndarray], char_ngram: int, bands: int, workers: int = -1): -# self.workers = multiprocessing.cpu_count() if workers == -1 else workers -# hasher = minhash.MinHasher(seeds=seeds, char_ngram=char_ngram, random_state=42) -# self.lsh_cache = cache.Cache(num_bands=bands, hasher=hasher) -# -# def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) -> List[str]: -# if clear: -# self.lsh_cache.clear() -# -# duplicate_ids: Set[int] = set() -# keep_form_duplicate_ids = set() -# for i, j in self.get_all_duplicates(docs, min_jaccard): -# if i not in duplicate_ids and j not in duplicate_ids: -# keep_form_duplicate_ids.add(i) -# elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: -# keep_form_duplicate_ids.remove(j) -# duplicate_ids.add(i) -# duplicate_ids.add(j) -# -# keep = set(range(len(docs))) - duplicate_ids | keep_form_duplicate_ids -# -# return [docs[i] for i in keep] -# -# def in_memory_batch_deduplicate(self, docs: Iterable[str], min_jaccard: float, batch_size: int) -> Iterable[str]: -# batch_docs = list(islice(docs, batch_size)) -# while batch_docs: -# batch_docs = self.deduplicate(batch_docs, min_jaccard=min_jaccard, clear=True) -# gc.collect() -# add_into_batch = batch_size - len(batch_docs) -# if add_into_batch > 0: -# new_docs = list(islice(docs, add_into_batch)) -# if not new_docs: -# break -# batch_docs.extend(new_docs) -# else: -# yield from batch_docs -# batch_docs = list(islice(docs, batch_size)) -# yield from batch_docs -# -# def lsh_batch_deduplicate( -# self, docs: Iterable[str], min_jaccard: float, batch_size: int, clear: bool = True -# ) -> Iterable[str]: -# """ -# batch_size: max size of docs will be deduplicated at time, while `batch_docs` list will be extended -# until `batch_size` unique docs will be collected into it -# """ -# if clear: -# self.lsh_cache.clear() -# -# start_id, end_id = 0, batch_size -# keep_form_duplicate_ids: Set[int] = set() -# duplicate_ids: Set[int] = set() -# -# batch_docs = list(islice(docs, batch_size)) -# -# while batch_docs: -# -# new_duplicate_ids = set() -# # batch_docs[start_id:] to process just recently appended docs, -# # start_id = start_id to set new ids for recently appended docs -# for i, j in self.get_all_duplicates(batch_docs[start_id:], min_jaccard, start_id=start_id): -# if i not in duplicate_ids and j not in duplicate_ids: -# keep_form_duplicate_ids.add(i) -# elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: -# keep_form_duplicate_ids.remove(j) -# duplicate_ids.add(i) -# duplicate_ids.add(j) -# new_duplicate_ids.add(i) -# new_duplicate_ids.add(j) -# -# # new_duplicate_ids - to clear duplicates from recently appended -# # docs, as previous duplicate ids have already cleared -# drop_ids = new_duplicate_ids - keep_form_duplicate_ids -# -# if drop_ids: -# for i in drop_ids: -# self.lsh_cache.remove_id(i) -# -# start_id = end_id -# end_id = start_id + len(drop_ids) -# else: -# # all non duplicated + keep_form_duplicate_ids -# for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: -# yield batch_docs[i] -# -# # new batch -# self.lsh_cache.clear() -# start_id, end_id = 0, batch_size -# duplicate_ids, keep_form_duplicate_ids = set(), set() -# batch_docs = [] -# -# # if drop_ids: we append next len(drop_ids) examples -# batch_docs.extend(islice(docs, end_id - start_id)) -# -# if batch_docs: -# # all non duplicated + keep_form_duplicate_ids -# for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: -# yield batch_docs[i] -# -# def get_all_duplicates(self, docs: Iterable[str], min_jaccard: float, start_id: int = 0) -> Set[Tuple[int, int]]: -# self._cache_texts_parallel(docs, start_id=start_id) -# return self.lsh_cache.get_all_duplicates(min_jaccard) # type: ignore -# -# def _cache_texts(self, docs: Iterable[str], start_id: int = 0) -> None: -# for i, doc in enumerate(docs, start_id): -# self.lsh_cache.add_doc(doc, i) -# -# def _cache_texts_parallel(self, docs: Iterable[str], start_id: int = 0) -> None: -# encoded_docs = (doc.encode("utf8") for doc in docs) -# with ProcessPoolExecutor(max_workers=self.workers) as executor: -# for i, fingerprint in enumerate(executor.map(self.lsh_cache.hasher.fingerprint, encoded_docs), start_id): -# self.lsh_cache.add_fingerprint(fingerprint, i) - - class ExtractTextsFromData(ITask): def __init__( self, @@ -348,3 +233,83 @@ def _write_to_file(self, texts: Iterable[str], environment_path: str) -> None: return write_to_train_val_files( texts, environment_path, test_ratio=self.test_ratio, test_size=self.test_size # type: ignore ) + + +class ExtractVectorsFromTexts(ITask): + def __init__( + self, + data_source: Iterable[str], + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + block_size: int, + workers: int = -1, + process_batch_size: int = 8192, + ): + self.workers = multiprocessing.cpu_count() if workers == -1 else workers + self.tokenizer = tokenizer + self.block_size = block_size + self.data_source = data_source + self.process_batch_size = process_batch_size + + def execute(self, environment_path: str) -> None: + input_ids_file = os.path.join(environment_path, f"processed_batch.jsonl") + if os.path.exists(input_ids_file): + raise FileExistsError(f"{input_ids_file} already exists") + + counter = 1 + for lines in self._read_chunk(): + batch_size = ceil(len(lines) / self.workers) + batched_lines = chunked(lines, batch_size) + extract_batch_args = ((batch_lines, self.tokenizer, self.block_size) for batch_lines in batched_lines) + + with open(input_ids_file, "a") as fp: + with ProcessPoolExecutor(max_workers=self.workers) as executor: + for batch in executor.map(self._extract_batch, extract_batch_args): + for input_ids in batch: + fp.write(json.dumps(input_ids)) + fp.write("\n") + logging.info(f"Currently extracted {counter} batches of size {self.process_batch_size}") + counter += 1 + + def _read_chunk(self) -> Iterator[List[str]]: + lines: List[str] = [] + for line in self.data_source: + if len(line) > 0 and not line.isspace(): + lines.append(line) + + if len(lines) == self.process_batch_size: + yield lines + lines = [] + if len(lines) > 0: + yield lines + + @staticmethod + def _extract_batch( + args: Tuple[List[str], Union[PreTrainedTokenizer, PreTrainedTokenizerFast], int] + ) -> List[List[int]]: + lines, tokenizer, block_size = args + batch_encoding: List[List[int]] = [] + current_line = [tokenizer.bos_token] + for line in lines: + tokens = tokenizer.tokenize(line) + if len(current_line) + len(tokens) + 1 <= block_size: + current_line.append(tokenizer.bos_token) + current_line.extend(tokens) + elif len(current_line) == block_size: + input_ids = tokenizer.convert_tokens_to_ids(current_line) + batch_encoding.append(input_ids) + current_line = [tokenizer.bos_token] + tokens + else: + current_line.append(tokenizer.bos_token) + n_tokens_to_add = block_size - len(current_line) + current_line.extend(tokens[:n_tokens_to_add]) + input_ids = tokenizer.convert_tokens_to_ids(current_line) + batch_encoding.append(input_ids) + + tokens = tokens[n_tokens_to_add:] + while len(tokens) >= block_size: + input_ids = tokenizer.convert_tokens_to_ids(tokens[: block_size]) + batch_encoding.append(input_ids) + tokens = tokens[block_size:] + + current_line = tokens + return batch_encoding From ae77dae923c0f8a306ff298a9e0cce7bb799ecf7 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Thu, 9 Sep 2021 14:56:41 +0300 Subject: [PATCH 17/28] separate MinHashLSHDeduplicator --- src/language_model/data/fuzzy_dedup.py | 132 +++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 src/language_model/data/fuzzy_dedup.py diff --git a/src/language_model/data/fuzzy_dedup.py b/src/language_model/data/fuzzy_dedup.py new file mode 100644 index 0000000..5987c5d --- /dev/null +++ b/src/language_model/data/fuzzy_dedup.py @@ -0,0 +1,132 @@ +import gc +import multiprocessing +from concurrent.futures import ProcessPoolExecutor +from itertools import islice +from typing import Iterable, List, Tuple, Union, Set + +import numpy as np +try: + from lsh import cache, minhash +except ImportError: + cache, minhash = None, None + + +class MinHashLSHDeduplicator: + def __init__(self, seeds: Union[int, np.ndarray], char_ngram: int, bands: int, workers: int = -1): + if cache is None or minhash is None: + raise ImportError( + "It seems like you do not have lsh package. To use 'MinHashLSHDeduplicator' you need install it: " + "$git clone https://github.com/mattilyra/LSH " + "$cd LSH && python setup.py install" + ) + hasher = minhash.MinHasher(seeds=seeds, char_ngram=char_ngram, random_state=42) + self.lsh_cache = cache.Cache(num_bands=bands, hasher=hasher) + self.workers = multiprocessing.cpu_count() if workers == -1 else workers + + def deduplicate(self, docs: List[str], min_jaccard: float, clear: bool = True) -> List[str]: + if clear: + self.lsh_cache.clear() + + duplicate_ids: Set[int] = set() + keep_form_duplicate_ids = set() + for i, j in self.get_all_duplicates(docs, min_jaccard): + if i not in duplicate_ids and j not in duplicate_ids: + keep_form_duplicate_ids.add(i) + elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: + keep_form_duplicate_ids.remove(j) + duplicate_ids.add(i) + duplicate_ids.add(j) + + keep = set(range(len(docs))) - duplicate_ids | keep_form_duplicate_ids + + return [docs[i] for i in keep] + + def in_memory_batch_deduplicate(self, docs: Iterable[str], min_jaccard: float, batch_size: int) -> Iterable[str]: + batch_docs = list(islice(docs, batch_size)) + while batch_docs: + batch_docs = self.deduplicate(batch_docs, min_jaccard=min_jaccard, clear=True) + gc.collect() + add_into_batch = batch_size - len(batch_docs) + if add_into_batch > 0: + new_docs = list(islice(docs, add_into_batch)) + if not new_docs: + break + batch_docs.extend(new_docs) + else: + yield from batch_docs + batch_docs = list(islice(docs, batch_size)) + yield from batch_docs + + def lsh_batch_deduplicate( + self, docs: Iterable[str], min_jaccard: float, batch_size: int, clear: bool = True + ) -> Iterable[str]: + """ + batch_size: max size of docs will be deduplicated at time, while `batch_docs` list will be extended + until `batch_size` unique docs will be collected into it + """ + if clear: + self.lsh_cache.clear() + + start_id, end_id = 0, batch_size + keep_form_duplicate_ids: Set[int] = set() + duplicate_ids: Set[int] = set() + + batch_docs = list(islice(docs, batch_size)) + + while batch_docs: + + new_duplicate_ids = set() + # batch_docs[start_id:] to process just recently appended docs, + # start_id = start_id to set new ids for recently appended docs + for i, j in self.get_all_duplicates(batch_docs[start_id:], min_jaccard, start_id=start_id): + if i not in duplicate_ids and j not in duplicate_ids: + keep_form_duplicate_ids.add(i) + elif i in keep_form_duplicate_ids and j in keep_form_duplicate_ids: + keep_form_duplicate_ids.remove(j) + duplicate_ids.add(i) + duplicate_ids.add(j) + new_duplicate_ids.add(i) + new_duplicate_ids.add(j) + + # new_duplicate_ids - to clear duplicates from recently appended + # docs, as previous duplicate ids have already cleared + drop_ids = new_duplicate_ids - keep_form_duplicate_ids + + if drop_ids: + for i in drop_ids: + self.lsh_cache.remove_id(i) + + start_id = end_id + end_id = start_id + len(drop_ids) + else: + # all non duplicated + keep_form_duplicate_ids + for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: + yield batch_docs[i] + + # new batch + self.lsh_cache.clear() + start_id, end_id = 0, batch_size + duplicate_ids, keep_form_duplicate_ids = set(), set() + batch_docs = [] + + # if drop_ids: we append next len(drop_ids) examples + batch_docs.extend(islice(docs, end_id - start_id)) + + if batch_docs: + # all non duplicated + keep_form_duplicate_ids + for i in set(range(len(batch_docs))) - duplicate_ids | keep_form_duplicate_ids: + yield batch_docs[i] + + def get_all_duplicates(self, docs: Iterable[str], min_jaccard: float, start_id: int = 0) -> Set[Tuple[int, int]]: + self._cache_texts_parallel(docs, start_id=start_id) + return self.lsh_cache.get_all_duplicates(min_jaccard) # type: ignore + + def _cache_texts(self, docs: Iterable[str], start_id: int = 0) -> None: + for i, doc in enumerate(docs, start_id): + self.lsh_cache.add_doc(doc, i) + + def _cache_texts_parallel(self, docs: Iterable[str], start_id: int = 0) -> None: + encoded_docs = (doc.encode("utf8") for doc in docs) + with ProcessPoolExecutor(max_workers=self.workers) as executor: + for i, fingerprint in enumerate(executor.map(self.lsh_cache.hasher.fingerprint, encoded_docs), start_id): + self.lsh_cache.add_fingerprint(fingerprint, i) \ No newline at end of file From 5d0566e5aac2282e1526a052fc164ae21c0fe4ff Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Thu, 9 Sep 2021 15:25:22 +0300 Subject: [PATCH 18/28] convert tokenizer to transformers format --- .isort.cfg | 2 +- .pre-commit-config.yaml | 31 ++----------------- configs/cyr/gpt/README.md | 3 +- .../gpt/extract_vectors/vectorize-train.py | 4 +-- .../extract_vectors/vectorize-validation.py | 4 +-- configs/cyr/gpt/train_model/ukr-gpt.py | 8 ++--- .../convert-to-transformers.py | 22 +++++++++++++ requirements.txt | 1 + src/language_model/data/dataset.py | 7 ++--- src/language_model/data/extract.py | 22 ++++++------- src/language_model/data/fuzzy_dedup.py | 5 +-- src/language_model/tokenization/tasks.py | 15 +++++++++ 12 files changed, 66 insertions(+), 58 deletions(-) create mode 100644 configs/cyr/gpt/train_tokenizer/convert-to-transformers.py diff --git a/.isort.cfg b/.isort.cfg index 615da3c..256241a 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -6,4 +6,4 @@ use_parentheses=True line_length=119 skip_glob=venv/*,stubs/* known_first_party = language_model -known_third_party = bs4,datasets,ds_shared,numpy,pynlple,setuptools,tokenizers,torch,transformers,wget +known_third_party = bs4,datasets,ds_shared,more_itertools,numpy,pynlple,setuptools,tokenizers,torch,transformers,wget diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8beafd0..745640b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,41 +1,14 @@ repos: - - repo: https://github.com/asottile/seed-isort-config - rev: v1.9.1 + - repo: git@github.com:youscan/python-codestyle.git + rev: pre_commit_version hooks: - id: seed-isort-config - - repo: https://github.com/pre-commit/mirrors-isort - rev: v4.3.21 - hooks: - id: isort - args: ["-rc"] - - repo: https://github.com/psf/black - rev: 19.3b0 - hooks: - id: black - args: ["--line-length=119"] - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.3.0 - hooks: - id: trailing-whitespace - id: check-yaml - id: check-json - id: end-of-file-fixer - id: requirements-txt-fixer - - repo: https://github.com/pycqa/flake8 - rev: 3.8.2 - hooks: - id: flake8 - additional_dependencies: [ - flake8-bugbear==20.1.4, - flake8-builtins==1.5.3, - flake8-debugger==3.2.1, - flake8-isort==3.0.0, - isort==4.3.21, - ] - args: ["--config=setup.cfg"] - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.761 - hooks: - id: mypy - args: ["--config=setup.cfg"] - exclude: configs/ diff --git a/configs/cyr/gpt/README.md b/configs/cyr/gpt/README.md index 7aed7b7..0d91687 100644 --- a/configs/cyr/gpt/README.md +++ b/configs/cyr/gpt/README.md @@ -6,4 +6,5 @@ 4) `python run.py --task configs/cyr/gpt/extract_texts/train-validation-open-data.py` 5) `python run.py --task configs/cyr/gpt/extract_texts/in-house-data.py` 6) `python run.py --task configs/cyr/gpt/train_tokenizer/ukr-gpt.py` -7) `shuf outputs/cyr/gpt/extract_texts/train-validation-open-data/train.txt -o outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt` +7) `python run.py --task configs/cyr/gpt/train_tokenizer/convert-to-transformers.py` +8) `shuf outputs/cyr/gpt/extract_texts/train-validation-open-data/train.txt -o outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt` diff --git a/configs/cyr/gpt/extract_vectors/vectorize-train.py b/configs/cyr/gpt/extract_vectors/vectorize-train.py index f50a010..7b4a9bd 100644 --- a/configs/cyr/gpt/extract_vectors/vectorize-train.py +++ b/configs/cyr/gpt/extract_vectors/vectorize-train.py @@ -2,7 +2,7 @@ from transformers import PreTrainedTokenizerFast -from language_model.data.extract import LineByLineSource, ShuffledSources, ExtractVectorsFromTexts +from language_model.data.extract import ExtractVectorsFromTexts, LineByLineSource, ShuffledSources TOKENIZER_PATH = "outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" @@ -23,5 +23,5 @@ tokenizer=PreTrainedTokenizerFast.from_pretrained(TOKENIZER_PATH), block_size=MODEL_MAX_LENGTH, process_batch_size=100_000, - workers=18 + workers=18, ) diff --git a/configs/cyr/gpt/extract_vectors/vectorize-validation.py b/configs/cyr/gpt/extract_vectors/vectorize-validation.py index 0f08f06..11fcd68 100644 --- a/configs/cyr/gpt/extract_vectors/vectorize-validation.py +++ b/configs/cyr/gpt/extract_vectors/vectorize-validation.py @@ -2,7 +2,7 @@ from transformers import PreTrainedTokenizerFast -from language_model.data.extract import LineByLineSource, ExtractVectorsFromTexts +from language_model.data.extract import ExtractVectorsFromTexts, LineByLineSource TOKENIZER_PATH = "outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" @@ -18,5 +18,5 @@ tokenizer=PreTrainedTokenizerFast.from_pretrained(TOKENIZER_PATH), block_size=MODEL_MAX_LENGTH, process_batch_size=100_000, - workers=18 + workers=18, ) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index d96ad5b..7ac7d3f 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -10,13 +10,9 @@ from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, FromInputIdsDataset from language_model.modelling.trainer import TransformersTrainTaskWithTokenizerSaving -TOKENIZER_PATH = ( - f"/mnt/lost+found/language-models/outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" -) +TOKENIZER_PATH = "/mnt/lost+found/language-models/outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" -TRAIN_IDS_PATH = ( - "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/vectorize-train/processed_batch.jsonl" -) +TRAIN_IDS_PATH = "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/vectorize-train/processed_batch.jsonl" VALIDATION_IDS_PATH = ( "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/vectorize-validation/processed_batch.jsonl" ) diff --git a/configs/cyr/gpt/train_tokenizer/convert-to-transformers.py b/configs/cyr/gpt/train_tokenizer/convert-to-transformers.py new file mode 100644 index 0000000..4885e79 --- /dev/null +++ b/configs/cyr/gpt/train_tokenizer/convert-to-transformers.py @@ -0,0 +1,22 @@ +from transformers import PreTrainedTokenizerFast + +from language_model.tokenization.factory import FAST_TOKENIZER_DEFAULT_FILE_NAME +from language_model.tokenization.tasks import PreTrainedTokenizerFastSavingTask + +TOKENIZER_PATH = f"outputs/cyr/gpt/train_tokenizer/ukr-gpt/{FAST_TOKENIZER_DEFAULT_FILE_NAME}" + +IN_HOUSE_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/in-house-data/texts.txt" +OPEN_TRAIN_DATA_PATH = "outputs/cyr/gpt/extract_texts/train-validation-open-data/train_shuffled.txt" +MODEL_MAX_LENGTH = 1024 + + +# tokenizer +tokenizer = PreTrainedTokenizerFast( + tokenizer_file=TOKENIZER_PATH, model_max_length=MODEL_MAX_LENGTH, padding_side="right" +) +tokenizer.add_special_tokens({"bos_token": "<|endoftext|>"}) +# basically `pad_token` wont be used for training, as DataCollatorForGroupTextForCasualLMDataset pack sequences up to +# max_length but to avoid an error within DataCollatorForGroupTextForCasualLMDataset +tokenizer.pad_token = tokenizer.bos_token + +task = PreTrainedTokenizerFastSavingTask(pretrained_fast_tokenizer=tokenizer) diff --git a/requirements.txt b/requirements.txt index 728fc14..6df5490 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ bs4==0.0.1 Cython==3.0.0a9 datasets==1.11.0 lxml==4.6.3 +more-itertools==8.9.0 numpy==1.19.5 pyNlple==0.7.5 tokenizers==0.10.1 diff --git a/src/language_model/data/dataset.py b/src/language_model/data/dataset.py index d37cfbc..4019308 100644 --- a/src/language_model/data/dataset.py +++ b/src/language_model/data/dataset.py @@ -1,10 +1,10 @@ import itertools import json import logging +import math from itertools import chain from typing import Dict, Iterable, Iterator, List, Optional, Sequence -import math import torch from torch._utils import _accumulate from torch.utils.data import Dataset @@ -130,8 +130,7 @@ def split_lazy_dataset(dataset: LazyDataset, portions: Sequence[float]) -> List[ class FromInputIdsDataset(LazyDataset): - - def __init__(self, input_ids_file_path: str, ): + def __init__(self, input_ids_file_path: str): super(FromInputIdsDataset, self).__init__() self.input_ids_file_path = input_ids_file_path @@ -147,7 +146,7 @@ def _read_input_ids(self) -> List[List[int]]: return input_ids_list def __linit_entries__(self) -> Sequence[T_co]: - logging.info(f"Start reading input ids") + logging.info("Start reading input ids") entries = self._read_input_ids() logging.info("input ids have been read") return entries diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 7005eb5..1107426 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -6,13 +6,13 @@ from collections import Hashable as HashableType from collections import OrderedDict from concurrent.futures import ProcessPoolExecutor +from math import ceil from pathlib import Path from typing import Any, Callable, Dict, Generator, Hashable, Iterable, Iterator, List, Optional, Tuple, Union import numpy as np from bs4 import BeautifulSoup from ds_shared.loading import load_pickle -from math import ceil from more_itertools import chunked from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, StackingSource from pynlple.data.filesource import FilePathSource @@ -20,8 +20,8 @@ from pynlple.processing.preprocessor import IPreprocessor from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast -from .utils import write_to_texts_file, write_to_train_val_files from ..pipeline import ITask +from .utils import write_to_texts_file, write_to_train_val_files MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -237,12 +237,12 @@ def _write_to_file(self, texts: Iterable[str], environment_path: str) -> None: class ExtractVectorsFromTexts(ITask): def __init__( - self, - data_source: Iterable[str], - tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], - block_size: int, - workers: int = -1, - process_batch_size: int = 8192, + self, + data_source: Iterable[str], + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + block_size: int, + workers: int = -1, + process_batch_size: int = 8192, ): self.workers = multiprocessing.cpu_count() if workers == -1 else workers self.tokenizer = tokenizer @@ -251,7 +251,7 @@ def __init__( self.process_batch_size = process_batch_size def execute(self, environment_path: str) -> None: - input_ids_file = os.path.join(environment_path, f"processed_batch.jsonl") + input_ids_file = os.path.join(environment_path, "processed_batch.jsonl") if os.path.exists(input_ids_file): raise FileExistsError(f"{input_ids_file} already exists") @@ -284,7 +284,7 @@ def _read_chunk(self) -> Iterator[List[str]]: @staticmethod def _extract_batch( - args: Tuple[List[str], Union[PreTrainedTokenizer, PreTrainedTokenizerFast], int] + args: Tuple[List[str], Union[PreTrainedTokenizer, PreTrainedTokenizerFast], int] ) -> List[List[int]]: lines, tokenizer, block_size = args batch_encoding: List[List[int]] = [] @@ -307,7 +307,7 @@ def _extract_batch( tokens = tokens[n_tokens_to_add:] while len(tokens) >= block_size: - input_ids = tokenizer.convert_tokens_to_ids(tokens[: block_size]) + input_ids = tokenizer.convert_tokens_to_ids(tokens[:block_size]) batch_encoding.append(input_ids) tokens = tokens[block_size:] diff --git a/src/language_model/data/fuzzy_dedup.py b/src/language_model/data/fuzzy_dedup.py index 5987c5d..5d9ea7d 100644 --- a/src/language_model/data/fuzzy_dedup.py +++ b/src/language_model/data/fuzzy_dedup.py @@ -2,9 +2,10 @@ import multiprocessing from concurrent.futures import ProcessPoolExecutor from itertools import islice -from typing import Iterable, List, Tuple, Union, Set +from typing import Iterable, List, Set, Tuple, Union import numpy as np + try: from lsh import cache, minhash except ImportError: @@ -129,4 +130,4 @@ def _cache_texts_parallel(self, docs: Iterable[str], start_id: int = 0) -> None: encoded_docs = (doc.encode("utf8") for doc in docs) with ProcessPoolExecutor(max_workers=self.workers) as executor: for i, fingerprint in enumerate(executor.map(self.lsh_cache.hasher.fingerprint, encoded_docs), start_id): - self.lsh_cache.add_fingerprint(fingerprint, i) \ No newline at end of file + self.lsh_cache.add_fingerprint(fingerprint, i) diff --git a/src/language_model/tokenization/tasks.py b/src/language_model/tokenization/tasks.py index fcc1497..5a784f5 100644 --- a/src/language_model/tokenization/tasks.py +++ b/src/language_model/tokenization/tasks.py @@ -1,6 +1,7 @@ import os from tokenizers import Tokenizer +from transformers import PreTrainedTokenizerFast from ..pipeline import ITask from .factory import FAST_TOKENIZER_DEFAULT_FILE_NAME @@ -13,3 +14,17 @@ def __init__(self, fast_tokenizer: Tokenizer) -> None: def execute(self, environment_path: str) -> None: self.fast_tokenizer.save(os.path.join(environment_path, FAST_TOKENIZER_DEFAULT_FILE_NAME), pretty=True) + + +class PreTrainedTokenizerFastSavingTask(ITask): + def __init__( + self, pretrained_fast_tokenizer: PreTrainedTokenizerFast, tokenizer_folder_name: str = "tokenizer" + ) -> None: + super().__init__() + self.pretrained_fast_tokenizer = pretrained_fast_tokenizer + self.tokenizer_folder_name = tokenizer_folder_name + + def execute(self, environment_path: str) -> None: + self.pretrained_fast_tokenizer.save_pretrained( + os.path.join(environment_path, self.tokenizer_folder_name), legacy_format=False + ) From 2b4103a43cd3d5657f4702af62e9bb68de0d3ce3 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Fri, 10 Sep 2021 14:13:00 +0300 Subject: [PATCH 19/28] removed unused class --- src/language_model/modelling/trainer.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/language_model/modelling/trainer.py b/src/language_model/modelling/trainer.py index aed7ade..782253a 100644 --- a/src/language_model/modelling/trainer.py +++ b/src/language_model/modelling/trainer.py @@ -28,22 +28,6 @@ def execute(self, environment_path: str) -> None: self.trainer.save_model(os.path.join(environment_path, self.model_folder_name)) -class TransformersTrainTaskWithTokenizerSaving(TransformersTrainTask): - def __init__( - self, - trainer: Trainer, - checkpoint_folder: Optional[str] = None, - model_folder_name: str = "model", - tokenizer_folder_name: str = "tokenizer", - ): - super().__init__(trainer, checkpoint_folder, model_folder_name) - self.tokenizer_folder_name = tokenizer_folder_name - - def execute(self, environment_path: str) -> None: - super().execute(environment_path) - self.trainer.tokenizer.save_pretrained(os.path.join(environment_path, self.tokenizer_folder_name)) - - class RobertaForMaskedLMTrainTask(ITask): def __init__( self, From 7a3444842730a1f28a68d2d9db7601c6a08ba18d Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Fri, 10 Sep 2021 14:13:21 +0300 Subject: [PATCH 20/28] updated config --- configs/cyr/gpt/train_model/ukr-gpt.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 7ac7d3f..7eafd42 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -8,13 +8,15 @@ ) from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, FromInputIdsDataset -from language_model.modelling.trainer import TransformersTrainTaskWithTokenizerSaving +from language_model.modelling.trainer import TransformersTrainTask TOKENIZER_PATH = "/mnt/lost+found/language-models/outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" -TRAIN_IDS_PATH = "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/vectorize-train/processed_batch.jsonl" +TRAIN_IDS_PATH = ( + "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_vectors/vectorize-train/processed_batch.jsonl" +) VALIDATION_IDS_PATH = ( - "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_texts/vectorize-validation/processed_batch.jsonl" + "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_vectors/vectorize-validation/processed_batch.jsonl" ) MODEL_MAX_LENGTH = 1024 @@ -38,8 +40,8 @@ evaluation_strategy=IntervalStrategy.STEPS, eval_steps=250000, num_train_epochs=5, - per_device_train_batch_size=8, # overall bs = 8 * 8 * num_gpus (GPT2 used 512) - gradient_accumulation_steps=8, + per_device_train_batch_size=4, # overall bs = 4 * 16 * num_gpus (GPT2 used 512) + gradient_accumulation_steps=16, per_device_eval_batch_size=4, output_dir="checkpoints", overwrite_output_dir=False, @@ -58,6 +60,7 @@ load_best_model_at_end=True, group_by_length=False, report_to=["mlflow"], + dataloader_num_workers=1 # because of IterableDataset that reads from one opened file ) trainer = Trainer( @@ -68,4 +71,4 @@ data_collator=data_collator, ) -task = TransformersTrainTaskWithTokenizerSaving(trainer=trainer) +task = TransformersTrainTask(trainer=trainer) From ae0a8c9af629185ed7131e1e901a4130faae73d6 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Fri, 10 Sep 2021 14:35:36 +0300 Subject: [PATCH 21/28] map -> submit & as_completed --- src/language_model/data/extract.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 1107426..8bb5997 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -5,14 +5,15 @@ import random from collections import Hashable as HashableType from collections import OrderedDict +from concurrent import futures from concurrent.futures import ProcessPoolExecutor -from math import ceil from pathlib import Path -from typing import Any, Callable, Dict, Generator, Hashable, Iterable, Iterator, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Generator, Hashable, Iterable, Iterator, List, Optional, Union import numpy as np from bs4 import BeautifulSoup from ds_shared.loading import load_pickle +from math import ceil from more_itertools import chunked from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, StackingSource from pynlple.data.filesource import FilePathSource @@ -20,8 +21,8 @@ from pynlple.processing.preprocessor import IPreprocessor from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast -from ..pipeline import ITask from .utils import write_to_texts_file, write_to_train_val_files +from ..pipeline import ITask MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -259,17 +260,22 @@ def execute(self, environment_path: str) -> None: for lines in self._read_chunk(): batch_size = ceil(len(lines) / self.workers) batched_lines = chunked(lines, batch_size) - extract_batch_args = ((batch_lines, self.tokenizer, self.block_size) for batch_lines in batched_lines) with open(input_ids_file, "a") as fp: with ProcessPoolExecutor(max_workers=self.workers) as executor: - for batch in executor.map(self._extract_batch, extract_batch_args): - for input_ids in batch: + tasks = [ + executor.submit(self._extract_batch, batch_lines, self.tokenizer, self.block_size) + for batch_lines in batched_lines + ] + for completed_task in futures.as_completed(tasks): + for input_ids in completed_task.result(): fp.write(json.dumps(input_ids)) fp.write("\n") logging.info(f"Currently extracted {counter} batches of size {self.process_batch_size}") counter += 1 + logging.info(f"Vectors extracted") + def _read_chunk(self) -> Iterator[List[str]]: lines: List[str] = [] for line in self.data_source: @@ -284,9 +290,8 @@ def _read_chunk(self) -> Iterator[List[str]]: @staticmethod def _extract_batch( - args: Tuple[List[str], Union[PreTrainedTokenizer, PreTrainedTokenizerFast], int] + lines: List[str], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], block_size: int ) -> List[List[int]]: - lines, tokenizer, block_size = args batch_encoding: List[List[int]] = [] current_line = [tokenizer.bos_token] for line in lines: From 0b1f91d37fab2d23780c7f89bf0af2a08b80277c Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Fri, 10 Sep 2021 14:37:30 +0300 Subject: [PATCH 22/28] IterableDataset --- src/language_model/data/dataset.py | 38 ++++++++++++++++++------------ 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/src/language_model/data/dataset.py b/src/language_model/data/dataset.py index 4019308..5419a15 100644 --- a/src/language_model/data/dataset.py +++ b/src/language_model/data/dataset.py @@ -7,7 +7,7 @@ import torch from torch._utils import _accumulate -from torch.utils.data import Dataset +from torch.utils.data import Dataset, IterableDataset from torch.utils.data.dataset import T_co from transformers import PreTrainedTokenizer @@ -129,31 +129,39 @@ def split_lazy_dataset(dataset: LazyDataset, portions: Sequence[float]) -> List[ return [LazySubset(dataset, portions_provider=portions_provider, portion_id=i) for i in range(len(portions))] -class FromInputIdsDataset(LazyDataset): +class FromInputIdsDataset(IterableDataset): def __init__(self, input_ids_file_path: str): super(FromInputIdsDataset, self).__init__() self.input_ids_file_path = input_ids_file_path + self.length = self._get_number_of_valid_lines() - def _read_input_ids(self) -> List[List[int]]: - input_ids_list: List[List[int]] = [] + def _get_number_of_valid_lines(self) -> int: + number_of_valid_lines = 0 + for _ in self._read_lines(): + number_of_valid_lines += 1 + return number_of_valid_lines + + def _read_lines(self) -> Iterable[str]: with open(self.input_ids_file_path, "r") as f: for line in f: line = line.strip() if line: - input_ids = json.loads(line) - if input_ids: - input_ids_list.append(input_ids) - return input_ids_list + yield line - def __linit_entries__(self) -> Sequence[T_co]: - logging.info("Start reading input ids") - entries = self._read_input_ids() - logging.info("input ids have been read") - return entries + def __len__(self) -> int: + return self.length + + def __iter__(self) -> Iterator[List[int]]: + for line in self._read_lines(): + yield self._process(line) + + @staticmethod + def _process(line: str) -> List[int]: + return json.loads(line) # type: ignore class DataCollatorForGroupTextForCasualLMDataset: def __call__(self, examples: List[List[int]]) -> Dict[str, torch.Tensor]: - input_ids = torch.tensor(examples, dtype=torch.int) - labels = torch.tensor(examples, dtype=torch.int) + input_ids = torch.tensor(examples, dtype=torch.long) + labels = torch.tensor(examples, dtype=torch.long) return {"input_ids": input_ids, "labels": labels} From 5bd1dc0f1175600e6edcffb929f32ac8dd5a7fd5 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Fri, 10 Sep 2021 14:39:02 +0300 Subject: [PATCH 23/28] pre-commit fixes --- configs/cyr/gpt/train_model/ukr-gpt.py | 2 +- src/language_model/data/extract.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 7eafd42..09dd9d9 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -60,7 +60,7 @@ load_best_model_at_end=True, group_by_length=False, report_to=["mlflow"], - dataloader_num_workers=1 # because of IterableDataset that reads from one opened file + dataloader_num_workers=1, # because of IterableDataset that reads from one opened file ) trainer = Trainer( diff --git a/src/language_model/data/extract.py b/src/language_model/data/extract.py index 8bb5997..3692eee 100644 --- a/src/language_model/data/extract.py +++ b/src/language_model/data/extract.py @@ -7,13 +7,13 @@ from collections import OrderedDict from concurrent import futures from concurrent.futures import ProcessPoolExecutor +from math import ceil from pathlib import Path from typing import Any, Callable, Dict, Generator, Hashable, Iterable, Iterator, List, Optional, Union import numpy as np from bs4 import BeautifulSoup from ds_shared.loading import load_pickle -from math import ceil from more_itertools import chunked from pynlple.data.corpus import FilteringSource, JsonFieldSource, MappingSource, StackingSource from pynlple.data.filesource import FilePathSource @@ -21,8 +21,8 @@ from pynlple.processing.preprocessor import IPreprocessor from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast -from .utils import write_to_texts_file, write_to_train_val_files from ..pipeline import ITask +from .utils import write_to_texts_file, write_to_train_val_files MIN_TEXT_LEN = 10 MIN_TEXT_TOKEN_LENGTH = 2 @@ -274,7 +274,7 @@ def execute(self, environment_path: str) -> None: logging.info(f"Currently extracted {counter} batches of size {self.process_batch_size}") counter += 1 - logging.info(f"Vectors extracted") + logging.info("Vectors extracted") def _read_chunk(self) -> Iterator[List[str]]: lines: List[str] = [] @@ -290,7 +290,7 @@ def _read_chunk(self) -> Iterator[List[str]]: @staticmethod def _extract_batch( - lines: List[str], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], block_size: int + lines: List[str], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], block_size: int ) -> List[List[int]]: batch_encoding: List[List[int]] = [] current_line = [tokenizer.bos_token] From eb8aac7606d9861b4d31a34a096352cf5fb79636 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Fri, 10 Sep 2021 21:24:31 +0300 Subject: [PATCH 24/28] update paths --- configs/cyr/gpt/train_model/ukr-gpt.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 09dd9d9..27d867d 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -10,14 +10,10 @@ from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, FromInputIdsDataset from language_model.modelling.trainer import TransformersTrainTask -TOKENIZER_PATH = "/mnt/lost+found/language-models/outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" +TOKENIZER_PATH = "outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" -TRAIN_IDS_PATH = ( - "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_vectors/vectorize-train/processed_batch.jsonl" -) -VALIDATION_IDS_PATH = ( - "/mnt/lost+found/language-models/outputs/cyr/gpt/extract_vectors/vectorize-validation/processed_batch.jsonl" -) +TRAIN_IDS_PATH = "outputs/cyr/gpt/extract_vectors/vectorize-train/processed_batch.jsonl" +VALIDATION_IDS_PATH = "outputs/cyr/gpt/extract_vectors/vectorize-validation/processed_batch.jsonl" MODEL_MAX_LENGTH = 1024 From c3fdeb5456ea608774f520d1c6e64f41c1b91899 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 13 Sep 2021 17:33:02 +0300 Subject: [PATCH 25/28] fix max length --- configs/cyr/gpt/train_model/ukr-gpt.py | 2 +- src/language_model/data/dataset.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 27d867d..ed1dedf 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -27,7 +27,7 @@ # data train_dataset = FromInputIdsDataset(TRAIN_IDS_PATH) valid_dataset = FromInputIdsDataset(VALIDATION_IDS_PATH) -data_collator = DataCollatorForGroupTextForCasualLMDataset() +data_collator = DataCollatorForGroupTextForCasualLMDataset(MODEL_MAX_LENGTH) training_args = TrainingArguments( diff --git a/src/language_model/data/dataset.py b/src/language_model/data/dataset.py index 5419a15..cfebc49 100644 --- a/src/language_model/data/dataset.py +++ b/src/language_model/data/dataset.py @@ -161,7 +161,11 @@ def _process(line: str) -> List[int]: class DataCollatorForGroupTextForCasualLMDataset: + def __init__(self, max_length: int): + self.max_length = max_length + def __call__(self, examples: List[List[int]]) -> Dict[str, torch.Tensor]: + examples = [ids[: self.max_length] for ids in examples] input_ids = torch.tensor(examples, dtype=torch.long) labels = torch.tensor(examples, dtype=torch.long) return {"input_ids": input_ids, "labels": labels} From ba2569c58c7b94e6f3f2cba35109b61ca0b2b53a Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Mon, 13 Sep 2021 18:21:05 +0300 Subject: [PATCH 26/28] more frequent eval --- configs/cyr/gpt/train_model/ukr-gpt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index ed1dedf..13fa96b 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -34,14 +34,14 @@ do_train=True, do_eval=True, evaluation_strategy=IntervalStrategy.STEPS, - eval_steps=250000, + eval_steps=20_000, num_train_epochs=5, per_device_train_batch_size=4, # overall bs = 4 * 16 * num_gpus (GPT2 used 512) gradient_accumulation_steps=16, per_device_eval_batch_size=4, output_dir="checkpoints", overwrite_output_dir=False, - save_steps=250000, + save_steps=20_000, save_total_limit=10, prediction_loss_only=False, learning_rate=0.0002, # (was manually tuned in GPT2 on held-out validation) From f9f7c43c07733529e0161c85cc608c6ffe8415c2 Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Thu, 16 Sep 2021 08:04:19 +0300 Subject: [PATCH 27/28] make validation dataset not IterableDataset --- configs/cyr/gpt/train_model/ukr-gpt.py | 8 ++++++-- src/language_model/data/dataset.py | 24 ++++++++++++++++++++++-- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/configs/cyr/gpt/train_model/ukr-gpt.py b/configs/cyr/gpt/train_model/ukr-gpt.py index 13fa96b..300fb8d 100644 --- a/configs/cyr/gpt/train_model/ukr-gpt.py +++ b/configs/cyr/gpt/train_model/ukr-gpt.py @@ -7,7 +7,11 @@ TrainingArguments, ) -from language_model.data.dataset import DataCollatorForGroupTextForCasualLMDataset, FromInputIdsDataset +from language_model.data.dataset import ( + DataCollatorForGroupTextForCasualLMDataset, + FromInputIdsDataset, + FromInputIdsIterableDataset, +) from language_model.modelling.trainer import TransformersTrainTask TOKENIZER_PATH = "outputs/cyr/gpt/train_tokenizer/convert-to-transformers/tokenizer/" @@ -25,7 +29,7 @@ # data -train_dataset = FromInputIdsDataset(TRAIN_IDS_PATH) +train_dataset = FromInputIdsIterableDataset(TRAIN_IDS_PATH) valid_dataset = FromInputIdsDataset(VALIDATION_IDS_PATH) data_collator = DataCollatorForGroupTextForCasualLMDataset(MODEL_MAX_LENGTH) diff --git a/src/language_model/data/dataset.py b/src/language_model/data/dataset.py index cfebc49..36ebf58 100644 --- a/src/language_model/data/dataset.py +++ b/src/language_model/data/dataset.py @@ -129,9 +129,9 @@ def split_lazy_dataset(dataset: LazyDataset, portions: Sequence[float]) -> List[ return [LazySubset(dataset, portions_provider=portions_provider, portion_id=i) for i in range(len(portions))] -class FromInputIdsDataset(IterableDataset): +class FromInputIdsIterableDataset(IterableDataset): def __init__(self, input_ids_file_path: str): - super(FromInputIdsDataset, self).__init__() + super(FromInputIdsIterableDataset, self).__init__() self.input_ids_file_path = input_ids_file_path self.length = self._get_number_of_valid_lines() @@ -160,6 +160,26 @@ def _process(line: str) -> List[int]: return json.loads(line) # type: ignore +class FromInputIdsDataset(Dataset): + def __init__(self, input_ids_file_path: str): + self.data = [] + with open(input_ids_file_path, "r") as f: + for line in f: + line = line.strip() + if line: + self.data.append(self._process(line)) + + def __getitem__(self, index: int) -> List[int]: + return self.data[index] + + def __len__(self) -> int: + return len(self.data) + + @staticmethod + def _process(line: str) -> List[int]: + return json.loads(line) # type: ignore + + class DataCollatorForGroupTextForCasualLMDataset: def __init__(self, max_length: int): self.max_length = max_length From 5d9c4d2d352411fe15c71cb13b2b6e77a70bb53d Mon Sep 17 00:00:00 2001 From: vitaliy <1999kvo@gmail.com> Date: Thu, 16 Sep 2021 08:05:52 +0300 Subject: [PATCH 28/28] add logs to .gitignore --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 5f3943a..d01c1ab 100644 --- a/.gitignore +++ b/.gitignore @@ -97,3 +97,7 @@ results/ outputs/ lab/ credentials + +# logs +logs/ +mlruns/