Datasets:

ArXiv:
Elron commited on
Commit
8e99545
·
verified ·
1 Parent(s): aed783f

Upload folder using huggingface_hub

Browse files
Files changed (34) hide show
  1. api.py +41 -17
  2. artifact.py +58 -60
  3. augmentors.py +24 -19
  4. benchmark.py +20 -3
  5. catalog.py +14 -13
  6. data.py +15 -3
  7. error_utils.py +3 -0
  8. formats.py +287 -103
  9. generator_utils.py +2 -2
  10. image_operators.py +214 -16
  11. inference.py +915 -60
  12. llm_as_judge.py +37 -10
  13. loaders.py +114 -135
  14. metric_utils.py +21 -2
  15. metrics.py +91 -82
  16. operator.py +61 -17
  17. operators.py +62 -10
  18. processors.py +7 -0
  19. register.py +24 -4
  20. schema.py +44 -5
  21. serializers.py +21 -4
  22. settings_utils.py +9 -2
  23. split_utils.py +3 -1
  24. splitters.py +2 -4
  25. standard.py +59 -25
  26. stream.py +25 -11
  27. string_operators.py +14 -0
  28. struct_data_operators.py +74 -10
  29. task.py +5 -5
  30. templates.py +10 -2
  31. text_utils.py +59 -0
  32. types.py +5 -0
  33. utils.py +101 -0
  34. version.py +1 -1
api.py CHANGED
@@ -2,16 +2,21 @@ import json
2
  from functools import lru_cache
3
  from typing import Any, Dict, List, Optional, Union
4
 
 
 
5
  from .artifact import fetch_artifact
6
  from .dataset_utils import get_dataset_artifact
7
  from .inference import InferenceEngine, LogProbInferenceEngine
8
  from .logging_utils import get_logger
9
  from .metric_utils import _compute, _inference_post_process
10
  from .operator import SourceOperator
11
- from .schema import UNITXT_DATASET_SCHEMA
 
12
  from .standard import StandardRecipe
13
 
14
  logger = get_logger()
 
 
15
 
16
 
17
  def load(source: Union[SourceOperator, str]):
@@ -80,8 +85,12 @@ def load_recipe(dataset_query: Optional[str] = None, **kwargs) -> StandardRecipe
80
 
81
 
82
  def load_dataset(
83
- dataset_query: Optional[str] = None, streaming: bool = False, **kwargs
84
- ):
 
 
 
 
85
  """Loads dataset.
86
 
87
  If the 'dataset_query' argument is provided, then dataset is loaded from a card in local
@@ -93,6 +102,8 @@ def load_dataset(
93
  For example:
94
  "card=cards.wnli,template=templates.classification.multi_class.relation.default".
95
  streaming (bool, False): When True yields the data as Unitxt streams dictionary
 
 
96
  **kwargs: Arguments used to load dataset from provided card, which is not present in local catalog.
97
 
98
  Returns:
@@ -110,10 +121,21 @@ def load_dataset(
110
  """
111
  recipe = load_recipe(dataset_query, **kwargs)
112
 
 
 
 
 
 
 
 
113
  if streaming:
114
- return recipe()
 
 
115
 
116
- return recipe().to_dataset(features=UNITXT_DATASET_SCHEMA)
 
 
117
 
118
 
119
  def evaluate(predictions, data) -> List[Dict[str, Any]]:
@@ -129,14 +151,16 @@ def _get_produce_with_cache(dataset_query: Optional[str] = None, **kwargs):
129
  return load_recipe(dataset_query, **kwargs).produce
130
 
131
 
132
- def produce(instance_or_instances, dataset_query: Optional[str] = None, **kwargs):
 
 
133
  is_list = isinstance(instance_or_instances, list)
134
  if not is_list:
135
  instance_or_instances = [instance_or_instances]
136
  result = _get_produce_with_cache(dataset_query, **kwargs)(instance_or_instances)
137
  if not is_list:
138
- result = result[0]
139
- return result
140
 
141
 
142
  def infer(
@@ -174,13 +198,13 @@ def infer(
174
  )
175
  predictions = post_process(raw_predictions, dataset)
176
  if return_data:
177
- for prediction, raw_prediction, instance, infer_output in zip(
178
- predictions, raw_predictions, dataset, infer_outputs
179
- ):
180
- if return_meta_data:
181
- instance["infer_meta_data"] = infer_output.__dict__
182
- del instance["infer_meta_data"]["prediction"]
183
- instance["prediction"] = prediction
184
- instance["raw_prediction"] = raw_prediction
185
- return dataset
186
  return predictions
 
2
  from functools import lru_cache
3
  from typing import Any, Dict, List, Optional, Union
4
 
5
+ from datasets import Dataset, DatasetDict, IterableDataset, IterableDatasetDict
6
+
7
  from .artifact import fetch_artifact
8
  from .dataset_utils import get_dataset_artifact
9
  from .inference import InferenceEngine, LogProbInferenceEngine
10
  from .logging_utils import get_logger
11
  from .metric_utils import _compute, _inference_post_process
12
  from .operator import SourceOperator
13
+ from .schema import UNITXT_DATASET_SCHEMA, loads_instance
14
+ from .settings_utils import get_constants, get_settings
15
  from .standard import StandardRecipe
16
 
17
  logger = get_logger()
18
+ constants = get_constants()
19
+ settings = get_settings()
20
 
21
 
22
  def load(source: Union[SourceOperator, str]):
 
85
 
86
 
87
  def load_dataset(
88
+ dataset_query: Optional[str] = None,
89
+ split: Optional[str] = None,
90
+ streaming: bool = False,
91
+ disable_cache: Optional[bool] = None,
92
+ **kwargs,
93
+ ) -> Union[DatasetDict, IterableDatasetDict, Dataset, IterableDataset]:
94
  """Loads dataset.
95
 
96
  If the 'dataset_query' argument is provided, then dataset is loaded from a card in local
 
102
  For example:
103
  "card=cards.wnli,template=templates.classification.multi_class.relation.default".
104
  streaming (bool, False): When True yields the data as Unitxt streams dictionary
105
+ split (str, optional): The split of the data to load
106
+ disable_cache (str, optional): Disable caching process of the data
107
  **kwargs: Arguments used to load dataset from provided card, which is not present in local catalog.
108
 
109
  Returns:
 
121
  """
122
  recipe = load_recipe(dataset_query, **kwargs)
123
 
124
+ stream = recipe()
125
+ if split is not None:
126
+ stream = stream[split]
127
+
128
+ if disable_cache is None:
129
+ disable_cache = settings.disable_hf_datasets_cache
130
+
131
  if streaming:
132
+ return stream.to_iterable_dataset(
133
+ features=UNITXT_DATASET_SCHEMA,
134
+ ).map(loads_instance, batched=True)
135
 
136
+ return stream.to_dataset(
137
+ features=UNITXT_DATASET_SCHEMA, disable_cache=disable_cache
138
+ ).with_transform(loads_instance)
139
 
140
 
141
  def evaluate(predictions, data) -> List[Dict[str, Any]]:
 
151
  return load_recipe(dataset_query, **kwargs).produce
152
 
153
 
154
+ def produce(
155
+ instance_or_instances, dataset_query: Optional[str] = None, **kwargs
156
+ ) -> Union[Dataset, Dict[str, Any]]:
157
  is_list = isinstance(instance_or_instances, list)
158
  if not is_list:
159
  instance_or_instances = [instance_or_instances]
160
  result = _get_produce_with_cache(dataset_query, **kwargs)(instance_or_instances)
161
  if not is_list:
162
+ return result[0]
163
+ return Dataset.from_list(result).with_transform(loads_instance)
164
 
165
 
166
  def infer(
 
198
  )
199
  predictions = post_process(raw_predictions, dataset)
200
  if return_data:
201
+ if return_meta_data:
202
+ infer_output_list = [
203
+ infer_output.__dict__ for infer_output in infer_outputs
204
+ ]
205
+ for infer_output in infer_output_list:
206
+ del infer_output["prediction"]
207
+ dataset = dataset.add_column("infer_meta_data", infer_output_list)
208
+ dataset = dataset.add_column("prediction", predictions)
209
+ return dataset.add_column("raw_prediction", raw_predictions)
210
  return predictions
artifact.py CHANGED
@@ -15,13 +15,14 @@ from .dataclass import (
15
  NonPositionalField,
16
  fields,
17
  )
 
18
  from .logging_utils import get_logger
19
  from .parsing_utils import (
20
  separate_inside_and_outside_square_brackets,
21
  )
22
  from .settings_utils import get_constants, get_settings
23
  from .text_utils import camel_to_snake_case, is_camel_case
24
- from .type_utils import issubtype
25
  from .utils import (
26
  artifacts_json_cache,
27
  json_dump,
@@ -44,11 +45,11 @@ def verify_legal_catalog_name(name):
44
  ), f'Artifict name ("{name}") should be alphanumeric. Use "." for nesting (e.g. myfolder.my_artifact)'
45
 
46
 
47
- class Artifactories:
48
  def __new__(cls):
49
  if not hasattr(cls, "instance"):
50
  cls.instance = super().__new__(cls)
51
- cls.instance.artifactories = []
52
 
53
  return cls.instance
54
 
@@ -57,42 +58,34 @@ class Artifactories:
57
  return self
58
 
59
  def __next__(self):
60
- while self._index < len(self.artifactories):
61
- artifactory = self.artifactories[self._index]
62
  self._index += 1
63
  if (
64
- settings.use_only_local_catalogs and not artifactory.is_local
65
  ): # Corrected typo from 'is_loacl' to 'is_local'
66
  continue
67
- return artifactory
68
  raise StopIteration
69
 
70
- def register(self, artifactory):
71
  assert isinstance(
72
- artifactory, Artifactory
73
- ), "Artifactory must be an instance of Artifactory"
74
- assert hasattr(
75
- artifactory, "__contains__"
76
- ), "Artifactory must have __contains__ method"
77
- assert hasattr(
78
- artifactory, "__getitem__"
79
- ), "Artifactory must have __getitem__ method"
80
- self.artifactories = [artifactory, *self.artifactories]
81
-
82
- def unregister(self, artifactory):
83
  assert isinstance(
84
- artifactory, Artifactory
85
- ), "Artifactory must be an instance of Artifactory"
86
- assert hasattr(
87
- artifactory, "__contains__"
88
- ), "Artifactory must have __contains__ method"
89
- assert hasattr(
90
- artifactory, "__getitem__"
91
- ), "Artifactory must have __getitem__ method"
92
- self.artifactories.remove(artifactory)
93
 
94
  def reset(self):
95
- self.artifactories = []
96
 
97
 
98
  def map_values_in_place(object, mapper):
@@ -369,14 +362,19 @@ class Artifact(Dataclass):
369
  if not data_classification_policy:
370
  return instance
371
 
 
 
 
 
372
  instance_data_classification = instance.get("data_classification_policy")
373
  if not instance_data_classification:
374
- get_logger().warning(
375
  f"The data does not provide information if it can be used by "
376
  f"'{name}' with the following data classification policy "
377
  f"'{data_classification_policy}'. This may lead to sending of undesired "
378
  f"data to external service. Set the 'data_classification_policy' "
379
- f"of the data to ensure a proper handling of sensitive information."
 
380
  )
381
  return instance
382
 
@@ -384,14 +382,15 @@ class Artifact(Dataclass):
384
  data_classification in data_classification_policy
385
  for data_classification in instance_data_classification
386
  ):
387
- raise ValueError(
388
  f"The instance '{instance} 'has the following data classification policy "
389
  f"'{instance_data_classification}', however, the artifact '{name}' "
390
  f"is only configured to support the data with classification "
391
  f"'{data_classification_policy}'. To enable this either change "
392
  f"the 'data_classification_policy' attribute of the artifact, "
393
  f"or modify the environment variable "
394
- f"'UNITXT_DATA_CLASSIFICATION_POLICY' accordingly."
 
395
  )
396
 
397
  return instance
@@ -419,7 +418,7 @@ class ArtifactList(list, Artifact):
419
  artifact.prepare()
420
 
421
 
422
- class Artifactory(Artifact):
423
  is_local: bool = AbstractField()
424
 
425
  @abstractmethod
@@ -435,19 +434,19 @@ class Artifactory(Artifact):
435
  pass
436
 
437
 
438
- class UnitxtArtifactNotFoundError(Exception):
439
- def __init__(self, name, artifactories):
440
  self.name = name
441
- self.artifactories = artifactories
442
-
443
- def __str__(self):
444
- msg = f"Artifact {self.name} does not exist, in artifactories:{self.artifactories}."
445
  if settings.use_only_local_catalogs:
446
- msg += f" Notice that unitxt.settings.use_only_local_catalogs is set to True, if you want to use remote catalogs set this settings or the environment variable {settings.use_only_local_catalogs_key}."
447
- return f"Artifact {self.name} does not exist, in artifactories:{self.artifactories}"
448
 
449
 
450
- def fetch_artifact(artifact_rep) -> Tuple[Artifact, Union[Artifactory, None]]:
451
  """Loads an artifict from one of possible representations.
452
 
453
  (1) If artifact representation is already an Artifact object, return it.
@@ -467,12 +466,10 @@ def fetch_artifact(artifact_rep) -> Tuple[Artifact, Union[Artifactory, None]]:
467
  if isinstance(artifact_rep, str):
468
  name, _ = separate_inside_and_outside_square_brackets(artifact_rep)
469
  if is_name_legal_for_catalog(name):
470
- artifactory, artifact_rep, args = get_artifactory_name_and_args(
471
- name=artifact_rep
472
- )
473
- return artifactory.get_with_overwrite(
474
  artifact_rep, overwrite_args=args
475
- ), artifactory
476
 
477
  # If Json string, first load into dictionary
478
  if isinstance(artifact_rep, str):
@@ -481,24 +478,24 @@ def fetch_artifact(artifact_rep) -> Tuple[Artifact, Union[Artifactory, None]]:
481
  return Artifact.from_dict(artifact_rep), None
482
 
483
 
484
- def get_artifactory_name_and_args(
485
- name: str, artifactories: Optional[List[Artifactory]] = None
486
  ):
487
  name, args = separate_inside_and_outside_square_brackets(name)
488
 
489
- if artifactories is None:
490
- artifactories = list(Artifactories())
491
 
492
- for artifactory in artifactories:
493
- if name in artifactory:
494
- return artifactory, name, args
495
 
496
- raise UnitxtArtifactNotFoundError(name, artifactories)
497
 
498
 
499
  def verbosed_fetch_artifact(identifier):
500
- artifact, artifactory = fetch_artifact(identifier)
501
- logger.debug(f"Artifact {identifier} is fetched from {artifactory}")
502
  return artifact
503
 
504
 
@@ -569,10 +566,11 @@ def get_artifacts_data_classification(artifact: str) -> Optional[List[str]]:
569
  for artifact_data_classification in artifact_data_classifications
570
  )
571
  ):
572
- raise RuntimeError(
573
  "'UNITXT_DATA_CLASSIFICATION_POLICY' should be of type "
574
  "'Dict[str, List[str]]', where a artifact's name is a key, and a "
575
- "value is a list of data classifications used by that artifact."
 
576
  )
577
 
578
  if artifact not in data_classification.keys():
 
15
  NonPositionalField,
16
  fields,
17
  )
18
+ from .error_utils import Documentation, UnitxtError, UnitxtWarning
19
  from .logging_utils import get_logger
20
  from .parsing_utils import (
21
  separate_inside_and_outside_square_brackets,
22
  )
23
  from .settings_utils import get_constants, get_settings
24
  from .text_utils import camel_to_snake_case, is_camel_case
25
+ from .type_utils import isoftype, issubtype
26
  from .utils import (
27
  artifacts_json_cache,
28
  json_dump,
 
45
  ), f'Artifict name ("{name}") should be alphanumeric. Use "." for nesting (e.g. myfolder.my_artifact)'
46
 
47
 
48
+ class Catalogs:
49
  def __new__(cls):
50
  if not hasattr(cls, "instance"):
51
  cls.instance = super().__new__(cls)
52
+ cls.instance.catalogs = []
53
 
54
  return cls.instance
55
 
 
58
  return self
59
 
60
  def __next__(self):
61
+ while self._index < len(self.catalogs):
62
+ catalog = self.catalogs[self._index]
63
  self._index += 1
64
  if (
65
+ settings.use_only_local_catalogs and not catalog.is_local
66
  ): # Corrected typo from 'is_loacl' to 'is_local'
67
  continue
68
+ return catalog
69
  raise StopIteration
70
 
71
+ def register(self, catalog):
72
  assert isinstance(
73
+ catalog, AbstractCatalog
74
+ ), "catalog must be an instance of AbstractCatalog"
75
+ assert hasattr(catalog, "__contains__"), "catalog must have __contains__ method"
76
+ assert hasattr(catalog, "__getitem__"), "catalog must have __getitem__ method"
77
+ self.catalogs = [catalog, *self.catalogs]
78
+
79
+ def unregister(self, catalog):
 
 
 
 
80
  assert isinstance(
81
+ catalog, AbstractCatalog
82
+ ), "catalog must be an instance of Catalog"
83
+ assert hasattr(catalog, "__contains__"), "catalog must have __contains__ method"
84
+ assert hasattr(catalog, "__getitem__"), "catalog must have __getitem__ method"
85
+ self.catalogs.remove(catalog)
 
 
 
 
86
 
87
  def reset(self):
88
+ self.catalogs = []
89
 
90
 
91
  def map_values_in_place(object, mapper):
 
362
  if not data_classification_policy:
363
  return instance
364
 
365
+ if not isoftype(instance, Dict[str, Any]):
366
+ raise ValueError(
367
+ f"The instance passed to inference engine is not a dictionary. Instance:\n{instance}"
368
+ )
369
  instance_data_classification = instance.get("data_classification_policy")
370
  if not instance_data_classification:
371
+ UnitxtWarning(
372
  f"The data does not provide information if it can be used by "
373
  f"'{name}' with the following data classification policy "
374
  f"'{data_classification_policy}'. This may lead to sending of undesired "
375
  f"data to external service. Set the 'data_classification_policy' "
376
+ f"of the data to ensure a proper handling of sensitive information.",
377
+ Documentation.DATA_CLASSIFICATION_POLICY,
378
  )
379
  return instance
380
 
 
382
  data_classification in data_classification_policy
383
  for data_classification in instance_data_classification
384
  ):
385
+ raise UnitxtError(
386
  f"The instance '{instance} 'has the following data classification policy "
387
  f"'{instance_data_classification}', however, the artifact '{name}' "
388
  f"is only configured to support the data with classification "
389
  f"'{data_classification_policy}'. To enable this either change "
390
  f"the 'data_classification_policy' attribute of the artifact, "
391
  f"or modify the environment variable "
392
+ f"'UNITXT_DATA_CLASSIFICATION_POLICY' accordingly.",
393
+ Documentation.DATA_CLASSIFICATION_POLICY,
394
  )
395
 
396
  return instance
 
418
  artifact.prepare()
419
 
420
 
421
+ class AbstractCatalog(Artifact):
422
  is_local: bool = AbstractField()
423
 
424
  @abstractmethod
 
434
  pass
435
 
436
 
437
+ class UnitxtArtifactNotFoundError(UnitxtError):
438
+ def __init__(self, name, catalogs):
439
  self.name = name
440
+ self.catalogs = catalogs
441
+ msg = (
442
+ f"Artifact {self.name} does not exist, in Unitxt catalogs: {self.catalogs}."
443
+ )
444
  if settings.use_only_local_catalogs:
445
+ msg += f"\nNotice that unitxt.settings.use_only_local_catalogs is set to True, if you want to use remote catalogs set this settings or the environment variable {settings.use_only_local_catalogs_key}."
446
+ super().__init__(msg)
447
 
448
 
449
+ def fetch_artifact(artifact_rep) -> Tuple[Artifact, Union[AbstractCatalog, None]]:
450
  """Loads an artifict from one of possible representations.
451
 
452
  (1) If artifact representation is already an Artifact object, return it.
 
466
  if isinstance(artifact_rep, str):
467
  name, _ = separate_inside_and_outside_square_brackets(artifact_rep)
468
  if is_name_legal_for_catalog(name):
469
+ catalog, artifact_rep, args = get_catalog_name_and_args(name=artifact_rep)
470
+ return catalog.get_with_overwrite(
 
 
471
  artifact_rep, overwrite_args=args
472
+ ), catalog
473
 
474
  # If Json string, first load into dictionary
475
  if isinstance(artifact_rep, str):
 
478
  return Artifact.from_dict(artifact_rep), None
479
 
480
 
481
+ def get_catalog_name_and_args(
482
+ name: str, catalogs: Optional[List[AbstractCatalog]] = None
483
  ):
484
  name, args = separate_inside_and_outside_square_brackets(name)
485
 
486
+ if catalogs is None:
487
+ catalogs = list(Catalogs())
488
 
489
+ for catalog in catalogs:
490
+ if name in catalog:
491
+ return catalog, name, args
492
 
493
+ raise UnitxtArtifactNotFoundError(name, catalogs)
494
 
495
 
496
  def verbosed_fetch_artifact(identifier):
497
+ artifact, catalog = fetch_artifact(identifier)
498
+ logger.debug(f"Artifact {identifier} is fetched from {catalog}")
499
  return artifact
500
 
501
 
 
566
  for artifact_data_classification in artifact_data_classifications
567
  )
568
  ):
569
+ raise UnitxtError(
570
  "'UNITXT_DATA_CLASSIFICATION_POLICY' should be of type "
571
  "'Dict[str, List[str]]', where a artifact's name is a key, and a "
572
+ "value is a list of data classifications used by that artifact.",
573
+ Documentation.DATA_CLASSIFICATION_POLICY,
574
  )
575
 
576
  if artifact not in data_classification.keys():
augmentors.py CHANGED
@@ -9,16 +9,14 @@ from typing import (
9
 
10
  from .operators import FieldOperator
11
  from .random_utils import new_random_generator
12
- from .type_utils import isoftype
 
13
 
14
 
15
  class Augmentor(FieldOperator):
16
  """A stream operator that augments the values of either the task input fields before rendering with the template, or the input passed to the model after rendering of the template."""
17
 
18
- operator: FieldOperator
19
-
20
- def process_value(self, value: Any) -> Any:
21
- return self.operator.process_value(value)
22
 
23
 
24
  class TaskInputsAugmentor(Augmentor):
@@ -27,31 +25,38 @@ class TaskInputsAugmentor(Augmentor):
27
  self.field_to_field = {field: field for field in fields}
28
 
29
 
30
- class FinalStateInputsAugmentor(Augmentor):
31
- pass
32
 
 
 
 
 
33
 
34
- class ModelInputAugmentor(FinalStateInputsAugmentor):
35
- field = "source"
 
 
 
36
 
 
 
 
 
37
 
38
- class ImagesAugmentor(FinalStateInputsAugmentor):
39
- field = "media/images"
40
- process_every_value = True
41
 
42
-
43
- class Identity(FieldOperator):
44
- def process_value(self, value: Any) -> Any:
45
- return value
46
 
47
 
48
  class NullAugmentor(Augmentor):
49
  """Does not change the input string."""
50
 
51
- operator = Identity()
 
52
 
53
 
54
- class AugmentWhitespace(FieldOperator):
55
  """Augments the inputs by replacing existing whitespaces with other whitespaces.
56
 
57
  Currently, each whitespace is replaced by a random choice of 1-3 whitespace characters (space, tab, newline).
@@ -74,7 +79,7 @@ class AugmentWhitespace(FieldOperator):
74
  return new_value
75
 
76
 
77
- class AugmentPrefixSuffix(FieldOperator):
78
  r"""Augments the input by prepending and appending randomly selected (typically, whitespace) patterns.
79
 
80
  Args:
 
9
 
10
  from .operators import FieldOperator
11
  from .random_utils import new_random_generator
12
+ from .type_utils import isoftype, parse_type_string, to_type_string
13
+ from .types import Text
14
 
15
 
16
  class Augmentor(FieldOperator):
17
  """A stream operator that augments the values of either the task input fields before rendering with the template, or the input passed to the model after rendering of the template."""
18
 
19
+ pass
 
 
 
20
 
21
 
22
  class TaskInputsAugmentor(Augmentor):
 
25
  self.field_to_field = {field: field for field in fields}
26
 
27
 
28
+ class TypeDependentAugmentor(TaskInputsAugmentor):
29
+ augmented_type: object
30
 
31
+ def process_instance_value(self, value: Any, instance: Dict[str, Any]):
32
+ if not isoftype(value, self.augmented_type):
33
+ return value
34
+ return super().process_instance_value(value=value, instance=instance)
35
 
36
+ @classmethod
37
+ def process_data_after_load(cls, data):
38
+ if "augmented_type" in data:
39
+ data["augmented_type"] = parse_type_string(data["augmented_type"])
40
+ return data
41
 
42
+ def process_data_before_dump(self, data):
43
+ if "augmented_type" in data:
44
+ data["augmented_type"] = to_type_string(data["augmented_type"])
45
+ return data
46
 
 
 
 
47
 
48
+ class TextAugmentor(TypeDependentAugmentor):
49
+ augmented_type = Text
 
 
50
 
51
 
52
  class NullAugmentor(Augmentor):
53
  """Does not change the input string."""
54
 
55
+ def process_value(self, value: Any) -> Any:
56
+ return value
57
 
58
 
59
+ class AugmentWhitespace(TextAugmentor):
60
  """Augments the inputs by replacing existing whitespaces with other whitespaces.
61
 
62
  Currently, each whitespace is replaced by a random choice of 1-3 whitespace characters (space, tab, newline).
 
79
  return new_value
80
 
81
 
82
+ class AugmentPrefixSuffix(TextAugmentor):
83
  r"""Augments the input by prepending and appending randomly selected (typically, whitespace) patterns.
84
 
85
  Args:
benchmark.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from typing import Dict, Union
2
 
3
  from .dataclass import NonPositionalField
@@ -15,6 +16,10 @@ class BaseBenchmark(SourceOperator):
15
  system_prompt: SystemPrompt = NonPositionalField(default=None)
16
  loader_limit: int = NonPositionalField(default=None)
17
 
 
 
 
 
18
 
19
  class Benchmark(BaseBenchmark):
20
  subsets: Dict[str, Union[StandardRecipe, BaseBenchmark]]
@@ -23,14 +28,20 @@ class Benchmark(BaseBenchmark):
23
  max_samples_per_subset: int = None
24
 
25
  def verify(self):
 
26
  if (
27
  self.max_total_samples is not None
28
  and self.max_samples_per_subset is not None
29
  ):
30
  raise ValueError("Set either max_total_samples or max_samples_per_subset")
31
 
32
- def prepare(self):
33
- if self.format is not None or self.num_demos is not None:
 
 
 
 
 
34
  for subset in self.subsets.values():
35
  if self.num_demos is not None:
36
  subset.num_demos = self.num_demos
@@ -40,7 +51,13 @@ class Benchmark(BaseBenchmark):
40
  subset.system_prompt = self.system_prompt
41
  if self.loader_limit is not None:
42
  subset.loader_limit = self.loader_limit
43
- subset.prepare()
 
 
 
 
 
 
44
 
45
  def process(
46
  self,
 
1
+ from abc import abstractmethod
2
  from typing import Dict, Union
3
 
4
  from .dataclass import NonPositionalField
 
16
  system_prompt: SystemPrompt = NonPositionalField(default=None)
17
  loader_limit: int = NonPositionalField(default=None)
18
 
19
+ @abstractmethod
20
+ def reset(self):
21
+ pass
22
+
23
 
24
  class Benchmark(BaseBenchmark):
25
  subsets: Dict[str, Union[StandardRecipe, BaseBenchmark]]
 
28
  max_samples_per_subset: int = None
29
 
30
  def verify(self):
31
+ super().verify()
32
  if (
33
  self.max_total_samples is not None
34
  and self.max_samples_per_subset is not None
35
  ):
36
  raise ValueError("Set either max_total_samples or max_samples_per_subset")
37
 
38
+ def reset(self):
39
+ if (
40
+ self.format is not None
41
+ or self.num_demos is not None
42
+ or self.system_prompt is not None
43
+ or self.loader_limit is not None
44
+ ):
45
  for subset in self.subsets.values():
46
  if self.num_demos is not None:
47
  subset.num_demos = self.num_demos
 
51
  subset.system_prompt = self.system_prompt
52
  if self.loader_limit is not None:
53
  subset.loader_limit = self.loader_limit
54
+
55
+ subset.reset()
56
+
57
+ def prepare(self):
58
+ super().prepare()
59
+
60
+ self.reset()
61
 
62
  def process(
63
  self,
catalog.py CHANGED
@@ -8,10 +8,10 @@ from typing import Optional
8
  import requests
9
 
10
  from .artifact import (
 
11
  Artifact,
12
- Artifactories,
13
- Artifactory,
14
- get_artifactory_name_and_args,
15
  reset_artifacts_json_cache,
16
  verify_legal_catalog_name,
17
  )
@@ -24,10 +24,13 @@ logger = get_logger()
24
  constants = get_constants()
25
 
26
 
27
- class Catalog(Artifactory):
28
  name: str = None
29
  location: str = None
30
 
 
 
 
31
 
32
  class LocalCatalog(Catalog):
33
  name: str = "local"
@@ -145,13 +148,11 @@ def get_from_catalog(
145
  catalog = LocalCatalog(location=catalog_path)
146
 
147
  if catalog is None:
148
- artifactories = None
149
  else:
150
- artifactories = [catalog]
151
 
152
- catalog, name, args = get_artifactory_name_and_args(
153
- name, artifactories=artifactories
154
- )
155
 
156
  return catalog.get_with_overwrite(
157
  name=name,
@@ -161,10 +162,10 @@ def get_from_catalog(
161
 
162
  def get_local_catalogs_paths():
163
  result = []
164
- for artifactory in Artifactories():
165
- if isinstance(artifactory, LocalCatalog):
166
- if artifactory.is_local:
167
- result.append(artifactory.location)
168
  return result
169
 
170
 
 
8
  import requests
9
 
10
  from .artifact import (
11
+ AbstractCatalog,
12
  Artifact,
13
+ Catalogs,
14
+ get_catalog_name_and_args,
 
15
  reset_artifacts_json_cache,
16
  verify_legal_catalog_name,
17
  )
 
24
  constants = get_constants()
25
 
26
 
27
+ class Catalog(AbstractCatalog):
28
  name: str = None
29
  location: str = None
30
 
31
+ def __repr__(self):
32
+ return f"{self.location}"
33
+
34
 
35
  class LocalCatalog(Catalog):
36
  name: str = "local"
 
148
  catalog = LocalCatalog(location=catalog_path)
149
 
150
  if catalog is None:
151
+ catalogs = None
152
  else:
153
+ catalogs = [catalog]
154
 
155
+ catalog, name, args = get_catalog_name_and_args(name, catalogs=catalogs)
 
 
156
 
157
  return catalog.get_with_overwrite(
158
  name=name,
 
162
 
163
  def get_local_catalogs_paths():
164
  result = []
165
+ for catalog in Catalogs():
166
+ if isinstance(catalog, LocalCatalog):
167
+ if catalog.is_local:
168
+ result.append(catalog.location)
169
  return result
170
 
171
 
data.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
 
3
  import datasets
4
 
@@ -44,6 +45,7 @@ from .random_utils import __file__ as _
44
  from .recipe import __file__ as _
45
  from .register import __file__ as _
46
  from .schema import __file__ as _
 
47
  from .serializers import __file__ as _
48
  from .settings_utils import __file__ as _
49
  from .settings_utils import get_constants
@@ -65,15 +67,12 @@ from .utils import __file__ as _
65
  from .utils import is_package_installed
66
  from .validate import __file__ as _
67
  from .version import __file__ as _
68
- from .version import version
69
 
70
  logger = get_logger()
71
  constants = get_constants()
72
 
73
 
74
  class Dataset(datasets.GeneratorBasedBuilder):
75
- """TODO: Short description of my dataset."""
76
-
77
  VERSION = constants.version
78
 
79
  @property
@@ -114,3 +113,16 @@ class Dataset(datasets.GeneratorBasedBuilder):
114
  return super()._download_and_prepare(
115
  dl_manager, "no_checks", **prepare_splits_kwargs
116
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ from typing import Optional, Union
3
 
4
  import datasets
5
 
 
45
  from .recipe import __file__ as _
46
  from .register import __file__ as _
47
  from .schema import __file__ as _
48
+ from .schema import loads_instance
49
  from .serializers import __file__ as _
50
  from .settings_utils import __file__ as _
51
  from .settings_utils import get_constants
 
67
  from .utils import is_package_installed
68
  from .validate import __file__ as _
69
  from .version import __file__ as _
 
70
 
71
  logger = get_logger()
72
  constants = get_constants()
73
 
74
 
75
  class Dataset(datasets.GeneratorBasedBuilder):
 
 
76
  VERSION = constants.version
77
 
78
  @property
 
113
  return super()._download_and_prepare(
114
  dl_manager, "no_checks", **prepare_splits_kwargs
115
  )
116
+
117
+ def as_dataset(
118
+ self,
119
+ split: Optional[datasets.Split] = None,
120
+ run_post_process=True,
121
+ verification_mode: Optional[Union[datasets.VerificationMode, str]] = None,
122
+ in_memory=False,
123
+ ) -> Union[datasets.Dataset, datasets.DatasetDict]:
124
+ return (
125
+ super()
126
+ .as_dataset(split, run_post_process, verification_mode, in_memory)
127
+ .with_transform(loads_instance)
128
+ )
error_utils.py CHANGED
@@ -10,9 +10,12 @@ class Documentation:
10
  HUGGINGFACE_METRICS = "docs/adding_metric.html#adding-a-hugginface-metric"
11
  ADDING_TASK = "docs/adding_task.html"
12
  ADDING_TEMPLATE = "docs/adding_template.html"
 
13
  MULTIPLE_METRICS_OUTPUTS = (
14
  "docs/adding_metric.html#metric-outputs-with-multiple-metrics"
15
  )
 
 
16
 
17
 
18
  def additional_info(path: str) -> str:
 
10
  HUGGINGFACE_METRICS = "docs/adding_metric.html#adding-a-hugginface-metric"
11
  ADDING_TASK = "docs/adding_task.html"
12
  ADDING_TEMPLATE = "docs/adding_template.html"
13
+ POST_PROCESSORS = "docs/adding_template.html#post-processors"
14
  MULTIPLE_METRICS_OUTPUTS = (
15
  "docs/adding_metric.html#metric-outputs-with-multiple-metrics"
16
  )
17
+ DATA_CLASSIFICATION_POLICY = "docs/data_classification_policy.html"
18
+ CATALOG = "docs/saving_and_loading_from_catalog.html"
19
 
20
 
21
  def additional_info(path: str) -> str:
formats.py CHANGED
@@ -1,15 +1,25 @@
1
  import re
 
2
  from typing import (
3
  Any,
4
  Dict,
5
  List,
 
6
  Optional,
 
 
 
7
  )
8
 
9
  from .dataclass import OptionalField
 
 
10
  from .operator import InstanceOperator
 
11
  from .type_utils import isoftype
12
 
 
 
13
 
14
  class Format(InstanceOperator):
15
  pass
@@ -59,9 +69,7 @@ class BaseFormat(Format):
59
  demos_field: str = "demos"
60
 
61
  @staticmethod
62
- def _retrieve_field_and_pop_from_instance(
63
- instance, field_name, do_pop: bool = True
64
- ) -> str:
65
  if field_name is not None and field_name in instance:
66
  field_value = instance[field_name]
67
  if do_pop:
@@ -72,6 +80,53 @@ class BaseFormat(Format):
72
  return field_value
73
  return ""
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  class SystemFormat(BaseFormat):
77
  r"""Generates the whole input to the model, from constant strings that are given as args, and from values found in specified fields of the instance.
@@ -137,51 +192,19 @@ class SystemFormat(BaseFormat):
137
  )
138
  format_args: Dict[str, str] = OptionalField(default_factory=dict)
139
 
140
- def process(
141
- self, instance: Dict[str, Any], stream_name: Optional[str] = None
142
- ) -> Dict[str, Any]:
143
- assert (
144
- "source" in instance
145
- ), f"field 'source' is expected to be in the input instance. Received instance: {instance}"
146
- source = self._retrieve_field_and_pop_from_instance(
147
- instance=instance, field_name="source"
148
- )
149
-
150
- instruction = self._retrieve_field_and_pop_from_instance(
151
- instance=instance, field_name="instruction"
152
- )
153
- target_prefix = self._retrieve_field_and_pop_from_instance(
154
- instance=instance, field_name="target_prefix"
155
- )
156
- system_prompt = self._retrieve_field_and_pop_from_instance(
157
- instance=instance, field_name="system_prompt"
158
- )
159
-
160
- demo_instances = []
161
- if self.demos_field is not None and self.demos_field in instance:
162
- demos = instance[self.demos_field]
163
- assert (
164
- demos is not None and isoftype(demos, List[Dict[str, Any]])
165
- ), f"A list of dict-s is expected in field '{self.demos_field}'. Received instance: {instance}"
166
- demo_instances = demos
167
- # instance.pop(self.demos_field)
168
-
169
  demos_string = ""
170
- for demo_instance in demo_instances:
171
- demo_source = self._retrieve_field_and_pop_from_instance(
172
- instance=demo_instance, field_name="source", do_pop=False
173
- )
174
- demo_target = self._retrieve_field_and_pop_from_instance(
175
- instance=demo_instance, field_name="target", do_pop=False
176
- )
177
- demo_target_prefix = self._retrieve_field_and_pop_from_instance(
178
- instance=demo_instance, field_name="target_prefix", do_pop=False
179
- )
180
-
181
  demo_str = self.demo_format.format(
182
- target_prefix=demo_target_prefix,
183
- source=demo_source,
184
- target=demo_target,
185
  instruction=instruction,
186
  **self.format_args,
187
  )
@@ -195,12 +218,209 @@ class SystemFormat(BaseFormat):
195
  target_prefix=target_prefix,
196
  **self.format_args,
197
  )
198
- output = apply_capital_new_line_notation(output)
199
- instance["source"] = output
200
- return instance
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
- class HFSystemFormat(BaseFormat):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  r"""Formats the complete input for the model using the HuggingFace chat template of a given model.
205
 
206
  HFSystemFormat expects the input instance to contain:
@@ -227,65 +447,29 @@ class HFSystemFormat(BaseFormat):
227
  """
228
 
229
  model_name: str
230
- _requirements_list = ["transformers"]
231
 
232
  def prepare(self):
 
233
  from transformers import AutoTokenizer
234
 
235
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
236
 
237
- def process(
238
- self, instance: Dict[str, Any], stream_name: Optional[str] = None
239
- ) -> Dict[str, Any]:
240
- assert (
241
- "source" in instance
242
- ), f"field 'source' is expected to be in the input instance. Received instance: {instance}"
243
-
244
- source = self._retrieve_field_and_pop_from_instance(
245
- instance=instance, field_name="source"
246
- )
247
-
248
- instruction = self._retrieve_field_and_pop_from_instance(
249
- instance=instance, field_name="instruction"
250
- )
251
- target_prefix = self._retrieve_field_and_pop_from_instance(
252
- instance=instance, field_name="target_prefix"
253
- )
254
- system_prompt = self._retrieve_field_and_pop_from_instance(
255
- instance=instance, field_name="system_prompt"
256
  )
257
-
258
- messages = [
259
- {
260
- "role": "system",
261
- "content": system_prompt
262
- + ("\n" if system_prompt != "" else "")
263
- + instruction,
264
- },
265
- ]
266
- demo_instances = []
267
- if self.demos_field is not None and self.demos_field in instance:
268
- demos = instance[self.demos_field]
269
- assert (
270
- demos is not None and isoftype(demos, List[Dict[str, Any]])
271
- ), f"A list of dict-s is expected in field '{self.demos_field}'. Received instance: {instance}"
272
- demo_instances = demos
273
- # instance.pop(self.demos_field)
274
-
275
- for demo_instance in demo_instances:
276
- messages.extend(
277
- [
278
- {"role": "user", "content": demo_instance["source"]},
279
- {
280
- "role": "assistant",
281
- "content": target_prefix + demo_instance["target"],
282
- },
283
- ]
284
  )
285
- messages.extend([{"role": "user", "content": source}])
286
- tokenized_chat = self.tokenizer.apply_chat_template(
287
- messages, tokenize=False, add_generation_prompt=True
288
  )
289
-
290
- instance["source"] = tokenized_chat + target_prefix
291
- return instance
 
1
  import re
2
+ from abc import abstractmethod
3
  from typing import (
4
  Any,
5
  Dict,
6
  List,
7
+ Literal,
8
  Optional,
9
+ Tuple,
10
+ TypedDict,
11
+ Union,
12
  )
13
 
14
  from .dataclass import OptionalField
15
+ from .dict_utils import dict_get
16
+ from .image_operators import image_to_data_url
17
  from .operator import InstanceOperator
18
+ from .settings_utils import get_constants
19
  from .type_utils import isoftype
20
 
21
+ constants = get_constants()
22
+
23
 
24
  class Format(InstanceOperator):
25
  pass
 
69
  demos_field: str = "demos"
70
 
71
  @staticmethod
72
+ def _pop_field(instance, field_name, do_pop: bool = True) -> str:
 
 
73
  if field_name is not None and field_name in instance:
74
  field_value = instance[field_name]
75
  if do_pop:
 
80
  return field_value
81
  return ""
82
 
83
+ def _prepare_instance_fields(self, instance) -> Tuple[str]:
84
+ instance_fields = {}
85
+
86
+ for field in "source", "instruction", "system_prompt", "target_prefix":
87
+ instance_fields[field] = self._pop_field(instance, field)
88
+
89
+ instance_fields["media"] = self._pop_field(instance, "media", do_pop=False)
90
+ if not instance_fields["media"]:
91
+ instance_fields["media"] = {"images": [], "audios": []}
92
+
93
+ instance_fields["demos"] = []
94
+ if self.demos_field is not None and self.demos_field in instance:
95
+ demos = instance[self.demos_field]
96
+ assert (
97
+ demos is not None and isoftype(demos, List[Dict[str, Any]])
98
+ ), f"A list of dict-s is expected in field '{self.demos_field}'. Received instance: {instance}"
99
+ for demo_instance in demos:
100
+ demo = {}
101
+ for field in ["source", "target", "target_prefix"]:
102
+ demo[field] = self._pop_field(demo_instance, field, do_pop=False)
103
+ instance_fields["demos"].append(demo)
104
+
105
+ return instance_fields
106
+
107
+ @abstractmethod
108
+ def _format_instance_to_source(
109
+ self,
110
+ system_prompt: str,
111
+ instruction: str,
112
+ source: str,
113
+ target_prefix: str,
114
+ demos: List[Dict[str, Any]],
115
+ media: Optional[Dict[str, Any]] = None,
116
+ ) -> str:
117
+ """Abstract method for formatting instances in different subclasses.
118
+
119
+ Subclasses should implement this method to define specific formatting behavior.
120
+ """
121
+ return ""
122
+
123
+ def process(
124
+ self, instance: Dict[str, Any], stream_name: Optional[str] = None
125
+ ) -> Dict[str, Any]:
126
+ instance_fields = self._prepare_instance_fields(instance)
127
+ instance["source"] = self._format_instance_to_source(**instance_fields)
128
+ return instance
129
+
130
 
131
  class SystemFormat(BaseFormat):
132
  r"""Generates the whole input to the model, from constant strings that are given as args, and from values found in specified fields of the instance.
 
192
  )
193
  format_args: Dict[str, str] = OptionalField(default_factory=dict)
194
 
195
+ def _format_instance_to_source(
196
+ self,
197
+ system_prompt: str,
198
+ instruction: str,
199
+ source: str,
200
+ target_prefix: str,
201
+ demos: List[Dict[str, Any]],
202
+ media: Optional[Dict[str, Any]] = None,
203
+ ) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  demos_string = ""
205
+ for demo in demos:
 
 
 
 
 
 
 
 
 
 
206
  demo_str = self.demo_format.format(
207
+ **demo,
 
 
208
  instruction=instruction,
209
  **self.format_args,
210
  )
 
218
  target_prefix=target_prefix,
219
  **self.format_args,
220
  )
 
 
 
221
 
222
+ return apply_capital_new_line_notation(output)
223
+
224
+
225
+ class TextContent(TypedDict):
226
+ type: Literal["text"]
227
+ text: str
228
+
229
+
230
+ class ImageUrlContent(TypedDict):
231
+ type: Literal["image_url"]
232
+ image_url: Dict[Literal["url"], str]
233
+
234
+
235
+ class ImageFileContent(TypedDict):
236
+ type: Literal["image_file"]
237
+ image_file: Dict[Literal["file_id"], str]
238
+
239
+
240
+ Content = Union[TextContent, ImageUrlContent, ImageFileContent]
241
+
242
+
243
+ class Message(TypedDict):
244
+ role: Literal["system", "user", "assistant"]
245
+ content: Union[str, List[Content]]
246
+
247
+
248
+ class ChatAPIFormat(BaseFormat):
249
+ r"""Formats output for LLM APIs using OpenAI's chat schema.
250
+
251
+ Many API services use OpenAI's chat format as a standard for conversational models.
252
+ `OpenAIFormat` prepares the output in this API-compatible format, converting input
253
+ instances into OpenAI's structured chat format, which supports both text and
254
+ multimedia elements, like images.
255
+
256
+ The formatted output can be easily converted to a dictionary using `json.loads()`
257
+ to make it ready for direct use with OpenAI's API.
258
+
259
+ Example:
260
+ Given an input instance:
261
+
262
+ .. code-block:: python
263
+
264
+ {
265
+ "source": "<img src='https://example.com/image1.jpg'>What's in this image?",
266
+ "target": "A dog",
267
+ "instruction": "Help the user.",
268
+ },
269
+
270
+ When processed by:
271
+
272
+ .. code-block:: python
273
+
274
+ system_format = OpenAIFormat()
275
 
276
+ The resulting formatted output is:
277
+
278
+ .. code-block:: python
279
+
280
+ {
281
+ "target": "A dog",
282
+ "source": '[{"role": "system", "content": "Help the user."}, '
283
+ '{"role": "user", "content": [{"type": "image_url", '
284
+ '"image_url": {"url": "https://example.com/image1.jpg", "detail": "low"}}, '
285
+ '{"type": "text", "text": "What\'s in this image?"}]}]'
286
+ }
287
+
288
+ This `source` field is a JSON-formatted string. To make it ready for OpenAI's API,
289
+ you can convert it to a dictionary using `json.loads()`:
290
+
291
+ .. code-block:: python
292
+
293
+ import json
294
+
295
+ messages = json.loads(formatted_output["source"])
296
+
297
+ response = client.chat.completions.create(
298
+ model="gpt-4o",
299
+ messages=messages,
300
+ )
301
+
302
+ The resulting `messages` is now a dictionary ready for sending to the OpenAI API.
303
+ """
304
+
305
+ def to_content(self, text: str, media: Dict[str, Any]) -> Union[str, List[Content]]:
306
+ # Regular expression to find <img> tags with src attribute
307
+ img_tag_pattern = re.compile(
308
+ r"<" + f"{constants.image_tag}" + r'\s+[^>]*src=["\']([^"\']+)["\'][^>]*>',
309
+ re.IGNORECASE,
310
+ )
311
+
312
+ # Find all matches of <img> tags and their positions
313
+ matches = list(img_tag_pattern.finditer(text))
314
+
315
+ # If no images are found, return the text as a plain string
316
+ if not matches:
317
+ return text
318
+
319
+ contents: List[dict] = []
320
+ last_pos = 0
321
+
322
+ # Process each match
323
+ for match in matches:
324
+ start, end = match.span()
325
+ img_url = match.group(1)
326
+
327
+ # Add preceding text, if any
328
+ if last_pos < start:
329
+ contents.append({"type": "text", "text": text[last_pos:start]})
330
+
331
+ # Add image content with a default detail level
332
+ if img_url.startswith("media/"):
333
+ image = dict_get(media, img_url[6:])
334
+ data_url = image_to_data_url(image)
335
+ contents.append(
336
+ {
337
+ "type": "image_url",
338
+ "image_url": {"url": data_url, "detail": "low"},
339
+ }
340
+ )
341
+ else:
342
+ contents.append(
343
+ {
344
+ "type": "image_url",
345
+ "image_url": {"url": img_url, "detail": "low"},
346
+ }
347
+ )
348
+
349
+ # Update the last processed position
350
+ last_pos = end
351
+
352
+ # Add any remaining text after the last image
353
+ if last_pos < len(text):
354
+ contents.append({"type": "text", "text": text[last_pos:]})
355
+
356
+ return contents
357
+
358
+ def to_chat(
359
+ self,
360
+ system_prompt: str,
361
+ instruction: str,
362
+ source: str,
363
+ target_prefix: str,
364
+ demos: List[Dict[str, Any]],
365
+ media: Optional[Dict[str, Any]] = None,
366
+ ) -> List[Message]:
367
+ messages = []
368
+
369
+ if system_prompt or instruction:
370
+ system_content = self.to_content(
371
+ system_prompt + ("\n" if system_prompt != "" else "") + instruction,
372
+ media,
373
+ )
374
+ messages.append(
375
+ {
376
+ "role": "system",
377
+ "content": system_content,
378
+ }
379
+ )
380
+
381
+ for demo_instance in demos:
382
+ user_content = self.to_content(demo_instance["source"], media)
383
+ assistant_content = self.to_content(
384
+ target_prefix + demo_instance["target"], media
385
+ )
386
+ messages.extend(
387
+ [
388
+ {"role": "user", "content": user_content},
389
+ {
390
+ "role": "assistant",
391
+ "content": assistant_content,
392
+ },
393
+ ]
394
+ )
395
+
396
+ last_user_content = self.to_content(source, media)
397
+
398
+ messages.extend([{"role": "user", "content": last_user_content}])
399
+
400
+ return messages
401
+
402
+ def _format_instance_to_source(
403
+ self,
404
+ system_prompt: str,
405
+ instruction: str,
406
+ source: str,
407
+ target_prefix: str,
408
+ demos: List[Dict[str, Any]],
409
+ media: Optional[Dict[str, Any]] = None,
410
+ ) -> Union[str, List[Message]]:
411
+ chat = self.to_chat(
412
+ system_prompt,
413
+ instruction,
414
+ source,
415
+ target_prefix,
416
+ demos,
417
+ media,
418
+ )
419
+ media["images"] = []
420
+ return chat
421
+
422
+
423
+ class HFSystemFormat(ChatAPIFormat):
424
  r"""Formats the complete input for the model using the HuggingFace chat template of a given model.
425
 
426
  HFSystemFormat expects the input instance to contain:
 
447
  """
448
 
449
  model_name: str
450
+ _requirements_list = ["transformers", "Jinja2"]
451
 
452
  def prepare(self):
453
+ super().prepare()
454
  from transformers import AutoTokenizer
455
 
456
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
457
 
458
+ def _format_instance_to_source(
459
+ self,
460
+ system_prompt: str,
461
+ instruction: str,
462
+ source: str,
463
+ target_prefix: str,
464
+ demos: List[Dict[str, Any]],
465
+ media: Optional[Dict[str, Any]] = None,
466
+ ) -> str:
467
+ chat = self.to_chat(
468
+ system_prompt, instruction, source, target_prefix, demos, media
 
 
 
 
 
 
 
 
469
  )
470
+ return (
471
+ self.tokenizer.apply_chat_template(
472
+ chat, tokenize=False, add_generation_prompt=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  )
474
+ + target_prefix
 
 
475
  )
 
 
 
generator_utils.py CHANGED
@@ -1,7 +1,7 @@
1
  from typing import Any, Dict, List
2
 
3
  from .dataclass import Dataclass, OptionalField
4
- from .utils import recursive_shallow_copy
5
 
6
 
7
  class ReusableGenerator(Dataclass):
@@ -22,4 +22,4 @@ class ReusableGenerator(Dataclass):
22
  class CopyingReusableGenerator(ReusableGenerator):
23
  def __iter__(self):
24
  for instance in self.activate():
25
- yield recursive_shallow_copy(instance)
 
1
  from typing import Any, Dict, List
2
 
3
  from .dataclass import Dataclass, OptionalField
4
+ from .utils import recursive_copy
5
 
6
 
7
  class ReusableGenerator(Dataclass):
 
22
  class CopyingReusableGenerator(ReusableGenerator):
23
  def __iter__(self):
24
  for instance in self.activate():
25
+ yield recursive_copy(instance)
image_operators.py CHANGED
@@ -2,12 +2,71 @@ import base64
2
  import io
3
  import re
4
  from abc import abstractmethod
5
- from typing import Any, Dict
6
 
7
  import numpy as np
 
8
 
 
9
  from .dict_utils import dict_get
10
- from .operators import FieldOperator, InstanceFieldOperator, PackageRequirementsMixin
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
 
13
  class PillowMixin(PackageRequirementsMixin):
@@ -16,14 +75,16 @@ class PillowMixin(PackageRequirementsMixin):
16
  def prepare(self):
17
  super().prepare()
18
  import PIL
19
- from PIL import Image
20
 
21
  self.pil = PIL
22
  self.image = Image
 
 
23
 
24
 
25
  def extract_images(text, instance):
26
- regex = r'<img\s+src=["\'](.*?)["\']'
27
  image_sources = re.findall(regex, text)
28
  images = []
29
  for image_source in image_sources:
@@ -33,31 +94,46 @@ def extract_images(text, instance):
33
 
34
 
35
  class DecodeImage(FieldOperator, PillowMixin):
36
- def decode_base64_to_image(self, base64_string):
37
- image_data = base64.b64decode(base64_string)
38
  return self.image.open(io.BytesIO(image_data))
39
 
40
- def process_value(self, value: Any) -> Any:
41
- return {"image": self.decode_base64_to_image(value)}
42
-
43
 
44
  class ToImage(InstanceFieldOperator):
45
- def process_instance_value(self, value: Any, instance: Dict[str, Any]):
46
- return {"image": value}
 
 
 
47
 
48
 
49
  class ImageFieldOperator(FieldOperator, PillowMixin):
50
  @abstractmethod
51
- def process_image(self, image):
52
  pass
53
 
54
- def process_value(self, value: Any) -> Any:
55
- if not isinstance(value, self.image.Image):
56
  raise ValueError(f"ImageFieldOperator requires image, got {type(value)}.")
57
- return self.process_image(value)
 
58
 
59
 
60
- class GrayScale(ImageFieldOperator):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  def process_image(self, image):
62
  # Convert the image to grayscale
63
  grayscale_image = image.convert("L")
@@ -75,6 +151,128 @@ class GrayScale(ImageFieldOperator):
75
  return self.image.fromarray(grayscale_array)
76
 
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  class ToRGB(ImageFieldOperator):
79
  def process_image(self, image):
80
  return image.convert("RGB")
 
2
  import io
3
  import re
4
  from abc import abstractmethod
5
+ from typing import Any, Dict, Tuple
6
 
7
  import numpy as np
8
+ from datasets import Image as DatasetsImage
9
 
10
+ from .augmentors import TaskInputsAugmentor
11
  from .dict_utils import dict_get
12
+ from .operator import PackageRequirementsMixin
13
+ from .operators import FieldOperator, InstanceFieldOperator
14
+ from .settings_utils import get_constants
15
+ from .type_utils import isoftype
16
+ from .types import Image
17
+
18
+ constants = get_constants()
19
+
20
+ datasets_image = DatasetsImage()
21
+
22
+
23
+ def _image_to_bytes(image, format="JPEG"):
24
+ import base64
25
+
26
+ with io.BytesIO() as buffer:
27
+ image.save(buffer, format=format)
28
+ return base64.b64encode(buffer.getvalue()).decode("utf-8")
29
+
30
+
31
+ def image_to_data_url(image: Image, default_format="JPEG"):
32
+ """Convert an image to a data URL.
33
+
34
+ https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data
35
+ """
36
+ image_format = image["format"] if image["format"] else default_format
37
+ base64_image = _image_to_bytes(image["image"], format=image_format.upper())
38
+ return f"data:image/{image_format.lower()};base64,{base64_image}"
39
+
40
+
41
+ def _bytes_to_image(b64_string):
42
+ import base64
43
+ import io
44
+
45
+ from PIL import Image
46
+
47
+ # Decode the base64-encoded string
48
+ decoded_bytes = base64.b64decode(b64_string)
49
+ # Open the image from the decoded bytes
50
+ return Image.open(io.BytesIO(decoded_bytes))
51
+
52
+
53
+ def data_url_to_image(data_url: str):
54
+ import re
55
+
56
+ # Verify that the string is a data URL
57
+ if not data_url.startswith("data:"):
58
+ raise ValueError("Invalid data URL")
59
+
60
+ # Extract the base64 data using a regular expression
61
+ match = re.match(r"data:image/(.*?);base64,(.*)", data_url)
62
+ if not match:
63
+ raise ValueError("Invalid data URL format")
64
+
65
+ # Extract image format and base64 data
66
+ image_format, b64_data = match.groups()
67
+
68
+ # Use _bytes_to_image to convert base64 data to an image
69
+ return _bytes_to_image(b64_data)
70
 
71
 
72
  class PillowMixin(PackageRequirementsMixin):
 
75
  def prepare(self):
76
  super().prepare()
77
  import PIL
78
+ from PIL import Image, ImageEnhance, ImageFilter
79
 
80
  self.pil = PIL
81
  self.image = Image
82
+ self.enhance = ImageEnhance
83
+ self.filter = ImageFilter
84
 
85
 
86
  def extract_images(text, instance):
87
+ regex = r"<" + f"{constants.image_tag}" + r'\s+src=["\'](.*?)["\']'
88
  image_sources = re.findall(regex, text)
89
  images = []
90
  for image_source in image_sources:
 
94
 
95
 
96
  class DecodeImage(FieldOperator, PillowMixin):
97
+ def process_value(self, value: str) -> Any:
98
+ image_data = base64.b64decode(value)
99
  return self.image.open(io.BytesIO(image_data))
100
 
 
 
 
101
 
102
  class ToImage(InstanceFieldOperator):
103
+ def process_instance_value(self, value: Any, instance: Dict[str, Any]) -> Image:
104
+ return {
105
+ "image": value,
106
+ "format": value.format if value.format is not None else "JPEG",
107
+ }
108
 
109
 
110
  class ImageFieldOperator(FieldOperator, PillowMixin):
111
  @abstractmethod
112
+ def process_image(self, image: Any):
113
  pass
114
 
115
+ def process_value(self, value: Image) -> Any:
116
+ if not isinstance(value["image"], self.image.Image):
117
  raise ValueError(f"ImageFieldOperator requires image, got {type(value)}.")
118
+ value["image"] = self.process_image(value["image"])
119
+ return value
120
 
121
 
122
+ class ImageAugmentor(TaskInputsAugmentor, PillowMixin):
123
+ augmented_type: object = Image
124
+
125
+ @abstractmethod
126
+ def process_image(self, image: Any):
127
+ pass
128
+
129
+ def process_value(self, value: Image) -> Any:
130
+ if not isoftype(value, Image):
131
+ return value
132
+ value["image"] = self.process_image(value["image"])
133
+ return value
134
+
135
+
136
+ class GrayScale(ImageAugmentor):
137
  def process_image(self, image):
138
  # Convert the image to grayscale
139
  grayscale_image = image.convert("L")
 
151
  return self.image.fromarray(grayscale_array)
152
 
153
 
154
+ class GridLines(ImageAugmentor):
155
+ """A class that overlays a fixed number of evenly spaced horizontal and vertical lines on an image.
156
+
157
+ Attributes:
158
+ - num_lines (int): The number of horizontal and vertical lines to add.
159
+ - line_thickness (int): Thickness of each line in pixels.
160
+ - line_color (Tuple[int, int, int]): RGB color of the grid lines.
161
+
162
+ Methods:
163
+ - process_image(image): Adds grid lines to the provided image and returns the modified image.
164
+ """
165
+
166
+ num_lines: int = 128
167
+ line_thickness: int = 1
168
+ line_color: Tuple[int, int, int] = (255, 255, 255)
169
+
170
+ def process_image(self, image):
171
+ image_array = np.array(image)
172
+
173
+ # Determine image dimensions
174
+ height, width, _ = image_array.shape
175
+
176
+ # Calculate spacing for the lines based on image size and number of lines
177
+ horizontal_spacing = height // (self.num_lines + 1)
178
+ vertical_spacing = width // (self.num_lines + 1)
179
+
180
+ # Add horizontal lines
181
+ for i in range(1, self.num_lines + 1):
182
+ y = i * horizontal_spacing
183
+ image_array[y : y + self.line_thickness, :, :] = self.line_color
184
+
185
+ # Add vertical lines
186
+ for i in range(1, self.num_lines + 1):
187
+ x = i * vertical_spacing
188
+ image_array[:, x : x + self.line_thickness, :] = self.line_color
189
+
190
+ # Convert back to a PIL image
191
+ return self.image.fromarray(image_array)
192
+
193
+
194
+ class PixelNoise(ImageAugmentor):
195
+ """A class that overlays a mask of randomly colored nxn squares across an image based on a specified noise rate.
196
+
197
+ Attributes:
198
+ - square_size (int): Size of each square in pixels.
199
+ - noise_rate (float): Proportion of the image that should be affected by noise (0 to 1).
200
+
201
+ Methods:
202
+ - process_image(image): Adds the random square mask to the provided image and returns the modified image.
203
+ """
204
+
205
+ square_size: int = 1
206
+ noise_rate: float = 0.3 # Percentage of squares to be randomly colored
207
+
208
+ def process_image(self, image):
209
+ image_array = np.array(image)
210
+ height, width, channels = image_array.shape
211
+
212
+ # Calculate grid dimensions
213
+ y_squares = height // self.square_size
214
+ x_squares = width // self.square_size
215
+
216
+ # Create a grid indicating where to apply the mask
217
+ noise_mask = np.random.rand(y_squares, x_squares) < self.noise_rate
218
+
219
+ # Generate random colors for each square
220
+ colors = np.random.randint(
221
+ 0, 256, (y_squares, x_squares, channels), dtype=np.uint8
222
+ )
223
+
224
+ # Expand the mask and colors to the size of the image array
225
+ mask_expanded = np.repeat(
226
+ np.repeat(noise_mask, self.square_size, axis=0), self.square_size, axis=1
227
+ )
228
+ colors_expanded = np.repeat(
229
+ np.repeat(colors, self.square_size, axis=0), self.square_size, axis=1
230
+ )
231
+
232
+ # Reshape `mask_expanded` to add the color channel dimension
233
+ mask_expanded = np.repeat(mask_expanded[:, :, np.newaxis], channels, axis=2)
234
+
235
+ # Apply colors where the mask is true using element-wise assignment
236
+ image_array = np.where(mask_expanded, colors_expanded, image_array)
237
+
238
+ # Convert back to a PIL image
239
+ return self.image.fromarray(image_array)
240
+
241
+
242
+ class Oldify(ImageAugmentor):
243
+ noise_strength: int = 30
244
+ tint_strength: float = 0.4 # Percentage of squares to be randomly colored
245
+
246
+ def process_image(self, image):
247
+ # Convert to a numpy array for manipulation
248
+ image_array = np.array(image)
249
+
250
+ # Step 1: Add a slight yellowish tint
251
+ yellow_tint = np.array([255, 228, 170], dtype=np.uint8) # Aged paper-like color
252
+ tinted_image_array = (
253
+ image_array * (1 - self.tint_strength) + yellow_tint * self.tint_strength
254
+ ).astype(np.uint8)
255
+
256
+ # Step 2: Add noise for a "film grain" effect
257
+ noise = np.random.normal(0, self.noise_strength, image_array.shape).astype(
258
+ np.int16
259
+ )
260
+ noisy_image_array = np.clip(tinted_image_array + noise, 0, 255).astype(np.uint8)
261
+
262
+ # Step 3: Convert back to a PIL Image for additional processing
263
+ old_image = self.image.fromarray(noisy_image_array)
264
+
265
+ # Step 4: Apply a slight blur to mimic an older lens or slight wear
266
+ old_image = old_image.filter(self.filter.GaussianBlur(radius=1))
267
+
268
+ # Step 5: Adjust contrast and brightness to give it a "faded" look
269
+ enhancer = self.enhance.Contrast(old_image)
270
+ old_image = enhancer.enhance(0.6) # Lower contrast
271
+
272
+ enhancer = self.enhance.Brightness(old_image)
273
+ return enhancer.enhance(1.2) # Slightly increased brightness
274
+
275
+
276
  class ToRGB(ImageFieldOperator):
277
  def process_image(self, image):
278
  return image.convert("RGB")
inference.py CHANGED
@@ -1,21 +1,50 @@
1
  import abc
 
2
  import dataclasses
 
 
3
  import os
4
  import re
 
 
 
 
5
  from typing import Any, Dict, List, Literal, Optional, Union
6
 
7
  from datasets import DatasetDict
8
- from tqdm import tqdm
 
9
 
10
- from .artifact import Artifact, fetch_artifact
11
  from .dataclass import InternalField, NonPositionalField
12
  from .deprecation_utils import deprecation
13
- from .image_operators import extract_images
 
14
  from .logging_utils import get_logger
15
  from .operator import PackageRequirementsMixin
16
- from .settings_utils import get_settings
 
17
 
 
18
  settings = get_settings()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
 
21
  def get_model_and_label_id(model_name, label):
@@ -49,7 +78,7 @@ class TextGenerationInferenceOutput:
49
  inference_type: Optional[str] = None
50
 
51
 
52
- class InferenceEngine(abc.ABC, Artifact):
53
  """Abstract base class for inference."""
54
 
55
  @abc.abstractmethod
@@ -73,6 +102,7 @@ class InferenceEngine(abc.ABC, Artifact):
73
 
74
  def prepare(self):
75
  if not settings.mock_inference_mode:
 
76
  self.prepare_engine()
77
 
78
  def infer(
@@ -93,9 +123,15 @@ class InferenceEngine(abc.ABC, Artifact):
93
 
94
  [self.verify_instance(instance) for instance in dataset]
95
  if settings.mock_inference_mode:
96
- return [instance["source"] for instance in dataset]
97
  return self._infer(dataset, return_meta_data)
98
 
 
 
 
 
 
 
99
  def get_engine_id(self):
100
  raise NotImplementedError()
101
 
@@ -116,6 +152,22 @@ class InferenceEngine(abc.ABC, Artifact):
116
  if param_inst_val is None:
117
  setattr(self, param, param_dict_val)
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  class LogProbInferenceEngine(abc.ABC, Artifact):
121
  """Abstract base class for inference with log probs."""
@@ -170,6 +222,8 @@ class HFPipelineBasedInferenceEngine(
170
  model_name: str
171
  max_new_tokens: int
172
  use_fp16: bool = True
 
 
173
 
174
  _requirements_list = {
175
  "transformers": "Install huggingface package using 'pip install --upgrade transformers"
@@ -178,9 +232,20 @@ class HFPipelineBasedInferenceEngine(
178
  def get_engine_id(self):
179
  return get_model_and_label_id(self.model_name, "hf_pipeline")
180
 
 
 
 
 
 
 
 
 
 
 
 
181
  def _prepare_pipeline(self):
182
  import torch
183
- from transformers import AutoConfig, pipeline
184
 
185
  model_args: Dict[str, Any] = (
186
  {"torch_dtype": torch.float16} if self.use_fp16 else {}
@@ -203,13 +268,7 @@ class HFPipelineBasedInferenceEngine(
203
  else:
204
  model_args.update({"device": device})
205
 
206
- task = (
207
- "text2text-generation"
208
- if AutoConfig.from_pretrained(
209
- self.model_name, trust_remote_code=True
210
- ).is_encoder_decoder
211
- else "text-generation"
212
- )
213
 
214
  if task == "text-generation":
215
  model_args.update({"return_full_text": False})
@@ -230,11 +289,18 @@ class HFPipelineBasedInferenceEngine(
230
  dataset: Union[List[Dict[str, Any]], DatasetDict],
231
  return_meta_data: bool = False,
232
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
 
 
 
233
  if not self._is_loaded():
234
  self._prepare_pipeline()
235
 
236
  outputs = []
237
- for output in self.model([instance["source"] for instance in dataset]):
 
 
 
 
238
  if isinstance(output, list):
239
  output = output[0]
240
  outputs.append(output["generated_text"])
@@ -251,12 +317,18 @@ class MockInferenceEngine(InferenceEngine):
251
  def prepare_engine(self):
252
  return
253
 
 
 
 
 
 
 
254
  def _infer(
255
  self,
256
  dataset: Union[List[Dict[str, Any]], DatasetDict],
257
  return_meta_data: bool = False,
258
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
259
- return [self.default_inference_value for instance in dataset]
260
 
261
 
262
  class MockModeMixin(Artifact):
@@ -302,7 +374,7 @@ class IbmGenAiInferenceEngineParams(Artifact):
302
  typical_p: Optional[float] = None
303
 
304
 
305
- class GenericInferenceEngine(InferenceEngine):
306
  default: Optional[str] = None
307
 
308
  def prepare_engine(self):
@@ -318,7 +390,7 @@ class GenericInferenceEngine(InferenceEngine):
318
  "\nor passing a similar required engine in the default argument"
319
  )
320
  engine_reference = self.default
321
- self.engine, _ = fetch_artifact(engine_reference)
322
 
323
  def get_engine_id(self):
324
  return "generic_inference_engine"
@@ -331,16 +403,17 @@ class GenericInferenceEngine(InferenceEngine):
331
  return self.engine._infer(dataset)
332
 
333
 
334
- class OllamaInferenceEngine(InferenceEngine, PackageRequirementsMixin):
 
 
335
  label: str = "ollama"
336
- model_name: str
337
  _requirements_list = {
338
  "ollama": "Install ollama package using 'pip install --upgrade ollama"
339
  }
340
  data_classification_policy = ["public", "proprietary"]
341
 
342
  def get_engine_id(self):
343
- return get_model_and_label_id(self.model_name, self.label)
344
 
345
  def prepare_engine(self):
346
  pass
@@ -352,19 +425,117 @@ class OllamaInferenceEngine(InferenceEngine, PackageRequirementsMixin):
352
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
353
  import ollama
354
 
355
- result = [
356
- ollama.chat(
357
- model="llama2",
358
- messages=[
359
- {
360
- "role": "user",
361
- "content": instance["source"],
362
- },
363
- ],
 
364
  )
365
- for instance in dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  ]
367
- return [element["message"]["content"] for element in result]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
369
 
370
  class IbmGenAiInferenceEngine(
@@ -372,11 +543,12 @@ class IbmGenAiInferenceEngine(
372
  IbmGenAiInferenceEngineParamsMixin,
373
  PackageRequirementsMixin,
374
  LogProbInferenceEngine,
 
375
  ):
376
  label: str = "ibm_genai"
377
  model_name: str
378
  _requirements_list = {
379
- "genai": "Install ibm-genai package using 'pip install --upgrade ibm-generative-ai"
380
  }
381
  data_classification_policy = ["public", "proprietary"]
382
  parameters: Optional[IbmGenAiInferenceEngineParams] = None
@@ -482,6 +654,62 @@ class IbmGenAiInferenceEngine(
482
  )
483
  return predict_result
484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485
 
486
  class OpenAiInferenceEngineParamsMixin(Artifact):
487
  frequency_penalty: Optional[float] = None
@@ -569,17 +797,9 @@ class OpenAiInferenceEngine(
569
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
570
  outputs = []
571
  for instance in tqdm(dataset, desc="Inferring with openAI API"):
 
572
  response = self.client.chat.completions.create(
573
- messages=[
574
- # {
575
- # "role": "system",
576
- # "content": self.system_prompt,
577
- # },
578
- {
579
- "role": "user",
580
- "content": instance["source"],
581
- }
582
- ],
583
  model=self.model_name,
584
  **self._get_completion_kwargs(),
585
  )
@@ -701,18 +921,19 @@ class TogetherAiInferenceEngine(
701
  if v is not None
702
  }
703
 
704
- def _infer_chat(self, prompt: str) -> str:
 
705
  response = self.client.chat.completions.create(
706
  model=self.model_name,
707
- messages=[{"role": "user", "content": prompt}],
708
  **self._get_infer_kwargs(),
709
  )
710
  return response.choices[0].message.content
711
 
712
- def _infer_text(self, prompt: str) -> str:
713
  response = self.client.completions.create(
714
  model=self.model_name,
715
- prompt=prompt,
716
  **self._get_infer_kwargs(),
717
  )
718
  return response.choices[0].text
@@ -727,10 +948,11 @@ class TogetherAiInferenceEngine(
727
  outputs = []
728
  if self.model_type == ModelType.CHAT:
729
  for instance in tqdm(dataset, desc="Inferring with Together AI Chat API"):
730
- outputs.append(self._infer_chat(instance["source"]))
731
  else:
 
732
  for instance in tqdm(dataset, desc="Inferring with Together AI Text API"):
733
- outputs.append(self._infer_text(instance["source"]))
734
  return outputs
735
 
736
 
@@ -791,6 +1013,7 @@ class WMLInferenceEngine(
791
  WMLInferenceEngineParamsMixin,
792
  PackageRequirementsMixin,
793
  LogProbInferenceEngine,
 
794
  ):
795
  """Runs inference using ibm-watsonx-ai.
796
 
@@ -835,7 +1058,7 @@ class WMLInferenceEngine(
835
  deployment_id: Optional[str] = None
836
  label: str = "wml"
837
  _requirements_list = {
838
- "ibm_watsonx_ai": "Install ibm-watsonx-ai package using 'pip install --upgrade ibm-watsonx-ai'. "
839
  "It is advised to have Python version >=3.10 installed, as at lower version this package "
840
  "may cause conflicts with other installed packages."
841
  }
@@ -930,12 +1153,13 @@ class WMLInferenceEngine(
930
  dataset: Union[List[Dict[str, Any]], DatasetDict],
931
  return_meta_data: bool = False,
932
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
 
933
  model, params = self._load_model_and_params()
934
 
935
  result = []
936
- for instance in dataset:
937
  instance_result = model.generate(
938
- prompt=instance["source"],
939
  params=self.to_dict([WMLInferenceEngineParamsMixin], keep_empty=False),
940
  )
941
  prediction = instance_result["results"][0]["generated_text"]
@@ -951,6 +1175,8 @@ class WMLInferenceEngine(
951
  dataset: Union[List[Dict[str, Any]], DatasetDict],
952
  return_meta_data: bool = False,
953
  ) -> Union[List[Dict], List[TextGenerationInferenceOutput]]:
 
 
954
  model, params = self._load_model_and_params()
955
 
956
  user_return_options = params.pop("return_options", {})
@@ -997,11 +1223,83 @@ class WMLInferenceEngine(
997
  )
998
  return predict_result
999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1000
 
1001
  class HFLlavaInferenceEngine(InferenceEngine, LazyLoadMixin):
1002
  model_name: str
1003
  max_new_tokens: int
1004
  lazy_load = True
 
1005
 
1006
  _requirements_list = {
1007
  "transformers": "Install huggingface package using 'pip install --upgrade transformers",
@@ -1039,6 +1337,21 @@ class HFLlavaInferenceEngine(InferenceEngine, LazyLoadMixin):
1039
  def _is_loaded(self):
1040
  return hasattr(self, "model") and self.model is not None
1041
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1042
  def _infer(
1043
  self,
1044
  dataset: Union[List[Dict[str, Any]], DatasetDict],
@@ -1051,16 +1364,19 @@ class HFLlavaInferenceEngine(InferenceEngine, LazyLoadMixin):
1051
 
1052
  results = []
1053
  for instance in tqdm(dataset):
1054
- text = instance["source"]
1055
- images = extract_images(text, instance)
1056
- # Regular expression to match all <img src="..."> tags
1057
- regex = r'<img\s+src=["\'](.*?)["\']\s*/?>'
1058
- model_input = re.sub(regex, "<image>", text)
1059
  if len(images) == 1:
1060
  images = images[0]
1061
- inputs = self.processor(
1062
- images=images, text=model_input, return_tensors="pt"
1063
- ).to(self.device, torch.float16)
 
 
 
 
 
 
1064
  input_len = len(inputs["input_ids"][0])
1065
  output = self.model.generate(
1066
  **inputs,
@@ -1074,3 +1390,542 @@ class HFLlavaInferenceEngine(InferenceEngine, LazyLoadMixin):
1074
  results.append(result)
1075
 
1076
  return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import abc
2
+ import asyncio
3
  import dataclasses
4
+ import json
5
+ import logging
6
  import os
7
  import re
8
+ import sys
9
+ import time
10
+ import uuid
11
+ from collections import Counter
12
  from typing import Any, Dict, List, Literal, Optional, Union
13
 
14
  from datasets import DatasetDict
15
+ from tqdm import tqdm, trange
16
+ from tqdm.asyncio import tqdm_asyncio
17
 
18
+ from .artifact import Artifact
19
  from .dataclass import InternalField, NonPositionalField
20
  from .deprecation_utils import deprecation
21
+ from .error_utils import UnitxtError
22
+ from .image_operators import data_url_to_image, extract_images
23
  from .logging_utils import get_logger
24
  from .operator import PackageRequirementsMixin
25
+ from .operators import ArtifactFetcherMixin
26
+ from .settings_utils import get_constants, get_settings
27
 
28
+ constants = get_constants()
29
  settings = get_settings()
30
+ logger = get_logger()
31
+
32
+
33
+ class StandardAPIParamsMixin(Artifact):
34
+ model: str
35
+ frequency_penalty: Optional[float] = None
36
+ presence_penalty: Optional[float] = None
37
+ max_tokens: Optional[int] = None
38
+ seed: Optional[int] = None
39
+ stop: Union[Optional[str], List[str]] = None
40
+ temperature: Optional[float] = None
41
+ top_p: Optional[float] = None
42
+ top_logprobs: Optional[int] = None
43
+ logit_bias: Optional[Dict[str, int]] = None
44
+ logprobs: Optional[bool] = None
45
+ n: Optional[int] = None
46
+ parallel_tool_calls: Optional[bool] = None
47
+ service_tier: Optional[Literal["auto", "default"]] = None
48
 
49
 
50
  def get_model_and_label_id(model_name, label):
 
78
  inference_type: Optional[str] = None
79
 
80
 
81
+ class InferenceEngine(Artifact):
82
  """Abstract base class for inference."""
83
 
84
  @abc.abstractmethod
 
102
 
103
  def prepare(self):
104
  if not settings.mock_inference_mode:
105
+ super().prepare() # no need to prepare a mock
106
  self.prepare_engine()
107
 
108
  def infer(
 
123
 
124
  [self.verify_instance(instance) for instance in dataset]
125
  if settings.mock_inference_mode:
126
+ return self._mock_infer(dataset)
127
  return self._infer(dataset, return_meta_data)
128
 
129
+ def _mock_infer(
130
+ self,
131
+ dataset: Union[List[Dict[str, Any]], DatasetDict],
132
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
133
+ return [str(instance["source"]) for instance in dataset]
134
+
135
  def get_engine_id(self):
136
  raise NotImplementedError()
137
 
 
152
  if param_inst_val is None:
153
  setattr(self, param, param_dict_val)
154
 
155
+ def verify_not_chat_api(self, dataset):
156
+ if isinstance(dataset[0]["source"], list):
157
+ raise NotImplementedError(
158
+ f"Inference engine {self.__class__.__name__} does not support chat api format."
159
+ )
160
+
161
+ def to_messages(self, instance):
162
+ if isinstance(instance["source"], list):
163
+ return instance["source"]
164
+ return [
165
+ {
166
+ "role": "user",
167
+ "content": instance["source"],
168
+ }
169
+ ]
170
+
171
 
172
  class LogProbInferenceEngine(abc.ABC, Artifact):
173
  """Abstract base class for inference with log probs."""
 
222
  model_name: str
223
  max_new_tokens: int
224
  use_fp16: bool = True
225
+ batch_size: int = 1
226
+ top_k: Optional[int] = None
227
 
228
  _requirements_list = {
229
  "transformers": "Install huggingface package using 'pip install --upgrade transformers"
 
232
  def get_engine_id(self):
233
  return get_model_and_label_id(self.model_name, "hf_pipeline")
234
 
235
+ def _get_task(self):
236
+ from transformers import AutoConfig
237
+
238
+ return (
239
+ "text2text-generation"
240
+ if AutoConfig.from_pretrained(
241
+ self.model_name, trust_remote_code=True
242
+ ).is_encoder_decoder
243
+ else "text-generation"
244
+ )
245
+
246
  def _prepare_pipeline(self):
247
  import torch
248
+ from transformers import pipeline
249
 
250
  model_args: Dict[str, Any] = (
251
  {"torch_dtype": torch.float16} if self.use_fp16 else {}
 
268
  else:
269
  model_args.update({"device": device})
270
 
271
+ task = self._get_task()
 
 
 
 
 
 
272
 
273
  if task == "text-generation":
274
  model_args.update({"return_full_text": False})
 
289
  dataset: Union[List[Dict[str, Any]], DatasetDict],
290
  return_meta_data: bool = False,
291
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
292
+ if self._get_task() == "text2text-generation":
293
+ self.verify_not_chat_api(dataset)
294
+
295
  if not self._is_loaded():
296
  self._prepare_pipeline()
297
 
298
  outputs = []
299
+ for output in self.model(
300
+ [instance["source"] for instance in dataset],
301
+ batch_size=self.batch_size,
302
+ top_k=self.top_k,
303
+ ):
304
  if isinstance(output, list):
305
  output = output[0]
306
  outputs.append(output["generated_text"])
 
317
  def prepare_engine(self):
318
  return
319
 
320
+ def _mock_infer(
321
+ self,
322
+ dataset: Union[List[Dict[str, Any]], DatasetDict],
323
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
324
+ return [self.default_inference_value for _ in dataset]
325
+
326
  def _infer(
327
  self,
328
  dataset: Union[List[Dict[str, Any]], DatasetDict],
329
  return_meta_data: bool = False,
330
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
331
+ return self._mock_infer(dataset)
332
 
333
 
334
  class MockModeMixin(Artifact):
 
374
  typical_p: Optional[float] = None
375
 
376
 
377
+ class GenericInferenceEngine(InferenceEngine, ArtifactFetcherMixin):
378
  default: Optional[str] = None
379
 
380
  def prepare_engine(self):
 
390
  "\nor passing a similar required engine in the default argument"
391
  )
392
  engine_reference = self.default
393
+ self.engine = self.get_artifact(engine_reference)
394
 
395
  def get_engine_id(self):
396
  return "generic_inference_engine"
 
403
  return self.engine._infer(dataset)
404
 
405
 
406
+ class OllamaInferenceEngine(
407
+ InferenceEngine, StandardAPIParamsMixin, PackageRequirementsMixin
408
+ ):
409
  label: str = "ollama"
 
410
  _requirements_list = {
411
  "ollama": "Install ollama package using 'pip install --upgrade ollama"
412
  }
413
  data_classification_policy = ["public", "proprietary"]
414
 
415
  def get_engine_id(self):
416
+ return get_model_and_label_id(self.model, self.label)
417
 
418
  def prepare_engine(self):
419
  pass
 
425
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
426
  import ollama
427
 
428
+ args = self.to_dict([StandardAPIParamsMixin])
429
+
430
+ results = []
431
+
432
+ for instance in dataset:
433
+ messages = self.to_messages(instance)
434
+ response = ollama.chat(
435
+ model=self.model,
436
+ messages=messages,
437
+ **args,
438
  )
439
+ results.append(response)
440
+
441
+ return [element["message"]["content"] for element in results]
442
+
443
+
444
+ class OptionSelectingByLogProbsInferenceEngine:
445
+ """OptionSelectingByLogProbsInferenceEngine inference engine is used to select an option based on the logprobs of an options list conditioned by a prompt.
446
+
447
+ The inference engines that inherit from this class must implement `get_token_count` and `get_options_log_probs`.
448
+ """
449
+
450
+ @abc.abstractmethod
451
+ def get_token_count(self, dataset):
452
+ """Get the token count of the source key of each dict of the dataset. Add to each instance in the data a "token_count" field.
453
+
454
+ Args:
455
+ dataset (List[Dict[str, Any]]): A list of dictionaries, each representing a data instance.
456
+
457
+ Returns:
458
+ List[int]: The token count of the texts
459
+ """
460
+
461
+ @abc.abstractmethod
462
+ def get_options_log_probs(self, dataset):
463
+ """Get the token logprobs of the options of the key task_data.options of each dict of the dataset.
464
+
465
+ Add to each instance in the data a "options_log_prob" field, which is a dict with str as key and a list of {text: str, logprob:float}.
466
+
467
+ Args:
468
+ dataset (List[Dict[str, Any]]): A list of dictionaries, each representing a data instance.
469
+
470
+ Returns:
471
+ List[int]: The token count of the texts
472
+ """
473
+
474
+ def select(self, dataset: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
475
+ """Calculate most likely labels based on log probabilities for a set of fixed completions."""
476
+ dataset_with_token_counts = self.get_token_count(dataset)
477
+ token_counts = [d["token_count"] for d in dataset_with_token_counts]
478
+
479
+ # pass in the token count so we only return the option score
480
+ dataset_with_options = [
481
+ {
482
+ "source": instance["source"] + option,
483
+ "task_data": {"token_count": token_count},
484
+ }
485
+ for instance, token_count in zip(dataset, token_counts)
486
+ for option in instance["task_data"]["options"]
487
  ]
488
+
489
+ dataset_with_options_logprobs: list[
490
+ list[dict[str, float | str]]
491
+ ] = self.get_options_log_probs(dataset_with_options)
492
+
493
+ dataset_iterator = iter(dataset_with_options_logprobs)
494
+
495
+ for instance in dataset:
496
+ tokens_with_logprob_list = []
497
+ # get the input tokens for the completions of the current resp_idx
498
+ for _ in instance["task_data"]["options"]:
499
+ tokens_with_logprob = next(dataset_iterator)["prediction"]
500
+ tokens_with_logprob_list.append(tokens_with_logprob)
501
+ # we start comparing all the options, e.g. if there are five options the value will be [0,1,2,3,4]
502
+ to_compare_indexes = list(range(len(instance["task_data"]["options"])))
503
+ # token_with_logprob_comp is the logprobs and the text of the tokens
504
+ # for each of the options at a specific index
505
+ for token_with_logprob_comp in zip(*tokens_with_logprob_list):
506
+ tokens_comp = [t["text"] for t in token_with_logprob_comp]
507
+ logprobs_comp = [t["logprob"] for t in token_with_logprob_comp]
508
+ # Find the maximum value by comparing the logprob of the nth token of non-discarded options
509
+ index_max = max(
510
+ (
511
+ (val, idx)
512
+ for idx, val in enumerate(logprobs_comp)
513
+ if idx in to_compare_indexes
514
+ ),
515
+ key=lambda x: x[0],
516
+ )[1]
517
+ # get the token of the biggest logprob
518
+ token_value_with_max_logprob = tokens_comp[index_max]
519
+ # check that the token is not repeated in the non-discarded options
520
+ count = tokens_comp.count(token_value_with_max_logprob)
521
+ if count > 1:
522
+ # multiple tokens with same max logprob, we need to continue iterating
523
+ to_compare_indexes = [
524
+ index
525
+ for index, token_value in enumerate(tokens_comp)
526
+ if token_value == token_value_with_max_logprob
527
+ ]
528
+ continue
529
+ # we got the index of the maximum log_prob that doesn't have a duplicated token value at other index
530
+ break
531
+
532
+ if len(to_compare_indexes) > 1:
533
+ # multiple options are either equal or have the same token values prefix
534
+ # choose the first
535
+ index_max = to_compare_indexes[0]
536
+
537
+ instance["prediction"] = instance["task_data"]["options"][index_max]
538
+ return dataset
539
 
540
 
541
  class IbmGenAiInferenceEngine(
 
543
  IbmGenAiInferenceEngineParamsMixin,
544
  PackageRequirementsMixin,
545
  LogProbInferenceEngine,
546
+ OptionSelectingByLogProbsInferenceEngine,
547
  ):
548
  label: str = "ibm_genai"
549
  model_name: str
550
  _requirements_list = {
551
+ "ibm-generative-ai": "Install ibm-genai package using 'pip install --upgrade ibm-generative-ai"
552
  }
553
  data_classification_policy = ["public", "proprietary"]
554
  parameters: Optional[IbmGenAiInferenceEngineParams] = None
 
654
  )
655
  return predict_result
656
 
657
+ def get_token_count(self, dataset):
658
+ texts = [instance["source"] for instance in dataset]
659
+ token_counts = list(
660
+ tqdm(
661
+ [
662
+ result.token_count
663
+ for response in self.client.text.tokenization.create(
664
+ model_id=self.model_name,
665
+ input=texts,
666
+ execution_options={"ordered": True},
667
+ )
668
+ for result in response.results
669
+ ],
670
+ desc="Tokenizing",
671
+ total=len(texts),
672
+ )
673
+ )
674
+ for i, token_count in enumerate(token_counts):
675
+ dataset[i]["token_count"] = token_count
676
+ return dataset
677
+
678
+ def get_options_log_probs(self, dataset):
679
+ """Add to each instance in the data a "options_log_prob" field, which is a dict with str as key and a list of {text: str, logprob:float}."""
680
+ from genai.schema import TextGenerationParameters, TextGenerationReturnOptions
681
+
682
+ texts = [x["source"] for x in dataset]
683
+
684
+ responses = tqdm(
685
+ self.client.text.generation.create(
686
+ model_id=self.model_name,
687
+ inputs=texts,
688
+ execution_options={"ordered": True},
689
+ parameters=TextGenerationParameters(
690
+ max_new_tokens=1,
691
+ return_options=TextGenerationReturnOptions(
692
+ input_tokens=True, token_logprobs=True
693
+ ),
694
+ # random_seed=self.random_state
695
+ ),
696
+ ),
697
+ total=len(texts),
698
+ desc="Completions",
699
+ )
700
+
701
+ scores = [
702
+ [
703
+ {"text": token.text, "logprob": token.logprob}
704
+ for token in response.results[0].input_tokens
705
+ ]
706
+ for response in responses
707
+ ]
708
+
709
+ for instance, score in zip(dataset, scores):
710
+ instance["prediction"] = score[instance["task_data"]["token_count"] - 1 :]
711
+ return dataset
712
+
713
 
714
  class OpenAiInferenceEngineParamsMixin(Artifact):
715
  frequency_penalty: Optional[float] = None
 
797
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
798
  outputs = []
799
  for instance in tqdm(dataset, desc="Inferring with openAI API"):
800
+ messages = self.to_messages(instance)
801
  response = self.client.chat.completions.create(
802
+ messages=messages,
 
 
 
 
 
 
 
 
 
803
  model=self.model_name,
804
  **self._get_completion_kwargs(),
805
  )
 
921
  if v is not None
922
  }
923
 
924
+ def _infer_chat(self, instance: Dict[str, Any]) -> str:
925
+ messages = self.to_messages(instance)
926
  response = self.client.chat.completions.create(
927
  model=self.model_name,
928
+ messages=messages,
929
  **self._get_infer_kwargs(),
930
  )
931
  return response.choices[0].message.content
932
 
933
+ def _infer_text(self, instance: Dict[str, Any]) -> str:
934
  response = self.client.completions.create(
935
  model=self.model_name,
936
+ prompt=instance["source"],
937
  **self._get_infer_kwargs(),
938
  )
939
  return response.choices[0].text
 
948
  outputs = []
949
  if self.model_type == ModelType.CHAT:
950
  for instance in tqdm(dataset, desc="Inferring with Together AI Chat API"):
951
+ outputs.append(self._infer_chat(instance))
952
  else:
953
+ self.verify_not_chat_api(dataset)
954
  for instance in tqdm(dataset, desc="Inferring with Together AI Text API"):
955
+ outputs.append(self._infer_text(instance))
956
  return outputs
957
 
958
 
 
1013
  WMLInferenceEngineParamsMixin,
1014
  PackageRequirementsMixin,
1015
  LogProbInferenceEngine,
1016
+ OptionSelectingByLogProbsInferenceEngine,
1017
  ):
1018
  """Runs inference using ibm-watsonx-ai.
1019
 
 
1058
  deployment_id: Optional[str] = None
1059
  label: str = "wml"
1060
  _requirements_list = {
1061
+ "ibm-watsonx-ai==1.1.14": "Install ibm-watsonx-ai package using 'pip install --upgrade ibm-watsonx-ai'. "
1062
  "It is advised to have Python version >=3.10 installed, as at lower version this package "
1063
  "may cause conflicts with other installed packages."
1064
  }
 
1153
  dataset: Union[List[Dict[str, Any]], DatasetDict],
1154
  return_meta_data: bool = False,
1155
  ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
1156
+ self.verify_not_chat_api(dataset)
1157
  model, params = self._load_model_and_params()
1158
 
1159
  result = []
1160
+ for source in dataset["source"]:
1161
  instance_result = model.generate(
1162
+ prompt=source,
1163
  params=self.to_dict([WMLInferenceEngineParamsMixin], keep_empty=False),
1164
  )
1165
  prediction = instance_result["results"][0]["generated_text"]
 
1175
  dataset: Union[List[Dict[str, Any]], DatasetDict],
1176
  return_meta_data: bool = False,
1177
  ) -> Union[List[Dict], List[TextGenerationInferenceOutput]]:
1178
+ self.verify_not_chat_api(dataset)
1179
+
1180
  model, params = self._load_model_and_params()
1181
 
1182
  user_return_options = params.pop("return_options", {})
 
1223
  )
1224
  return predict_result
1225
 
1226
+ def get_token_count(self, dataset):
1227
+ from ibm_watsonx_ai.foundation_models import ModelInference
1228
+
1229
+ texts = [instance["source"] for instance in dataset]
1230
+
1231
+ model = ModelInference(
1232
+ model_id=self.model_name,
1233
+ deployment_id=self.deployment_id,
1234
+ api_client=self._client,
1235
+ )
1236
+
1237
+ for i in trange(len(texts), desc="Tokenizing"):
1238
+ response = model.tokenize(prompt=texts[i], return_tokens=True)["result"]
1239
+ dataset[i]["token_count"] = response["token_count"]
1240
+
1241
+ return dataset
1242
+
1243
+ def get_options_log_probs(self, dataset):
1244
+ """Add to each instance in the data a "options_log_prob" field, which is a dict with str as key and a list of {text: str, logprob:float}."""
1245
+ from ibm_watsonx_ai.foundation_models import ModelInference
1246
+
1247
+ model = ModelInference(
1248
+ model_id=self.model_name,
1249
+ deployment_id=self.deployment_id,
1250
+ api_client=self._client,
1251
+ )
1252
+
1253
+ texts = [x["source"] for x in dataset]
1254
+
1255
+ responses = list(
1256
+ tqdm(
1257
+ model.generate(
1258
+ prompt=texts,
1259
+ params={
1260
+ "decoding_method": "greedy",
1261
+ "max_new_tokens": 1,
1262
+ "return_options": {
1263
+ "input_tokens": True,
1264
+ "token_logprobs": True,
1265
+ },
1266
+ },
1267
+ ),
1268
+ total=len(texts),
1269
+ desc="Completions",
1270
+ )
1271
+ )
1272
+
1273
+ scores = [
1274
+ [
1275
+ {
1276
+ "text": token["text"],
1277
+ "logprob": token["logprob"] if "logprob" in token else 1,
1278
+ }
1279
+ for token in response["results"][0]["input_tokens"]
1280
+ ]
1281
+ for response in responses
1282
+ ]
1283
+
1284
+ for instance, score in zip(dataset, scores):
1285
+ instance["prediction"] = score[instance["task_data"]["token_count"] - 1 :]
1286
+ return dataset
1287
+
1288
+
1289
+ def get_images_without_text(instance):
1290
+ return extract_images(instance["source"], instance)
1291
+
1292
+
1293
+ def get_text_without_images(instance, image_token="<image>"):
1294
+ regex = r"<" + f"{constants.image_tag}" + r'\s+src=["\'](.*?)["\']\s*/?>'
1295
+ return re.sub(regex, image_token, instance["source"])
1296
+
1297
 
1298
  class HFLlavaInferenceEngine(InferenceEngine, LazyLoadMixin):
1299
  model_name: str
1300
  max_new_tokens: int
1301
  lazy_load = True
1302
+ image_token = "<image>"
1303
 
1304
  _requirements_list = {
1305
  "transformers": "Install huggingface package using 'pip install --upgrade transformers",
 
1337
  def _is_loaded(self):
1338
  return hasattr(self, "model") and self.model is not None
1339
 
1340
+ def _get_input(self, instance):
1341
+ assert isinstance(instance["source"], list), "Must use format=formats.chat_api"
1342
+ images = []
1343
+ conversation = []
1344
+ for turn in instance["source"]:
1345
+ if isinstance(turn["content"], list):
1346
+ for content in turn["content"]:
1347
+ if content["type"] == "image_url":
1348
+ content["type"] = "image"
1349
+ image_url = content.pop("image_url")["url"]
1350
+ image = data_url_to_image(image_url)
1351
+ images.append(image)
1352
+ conversation.append(turn)
1353
+ return conversation, images
1354
+
1355
  def _infer(
1356
  self,
1357
  dataset: Union[List[Dict[str, Any]], DatasetDict],
 
1364
 
1365
  results = []
1366
  for instance in tqdm(dataset):
1367
+ conversation, images = self._get_input(instance)
1368
+
 
 
 
1369
  if len(images) == 1:
1370
  images = images[0]
1371
+
1372
+ text = self.processor.apply_chat_template(
1373
+ conversation, add_generation_prompt=True
1374
+ )
1375
+
1376
+ inputs = self.processor(images=images, text=text, return_tensors="pt").to(
1377
+ self.device, torch.float16
1378
+ )
1379
+
1380
  input_len = len(inputs["input_ids"][0])
1381
  output = self.model.generate(
1382
  **inputs,
 
1390
  results.append(result)
1391
 
1392
  return results
1393
+
1394
+
1395
+ class LMMSEvalBaseInferenceEngine(
1396
+ InferenceEngine, PackageRequirementsMixin, LazyLoadMixin
1397
+ ):
1398
+ model_type: str
1399
+ model_args: Dict[str, str]
1400
+ batch_size: int = 1
1401
+ image_token = "<image>"
1402
+
1403
+ _requirements_list = ["lmms-eval==0.2.4"]
1404
+
1405
+ def prepare_engine(self):
1406
+ if not self.lazy_load:
1407
+ self._prepare_engine()
1408
+
1409
+ def _prepare_engine(self):
1410
+ import torch
1411
+ from lmms_eval.api.instance import Instance
1412
+ from lmms_eval.models import get_model
1413
+
1414
+ self.new_instance = Instance
1415
+
1416
+ self.device = torch.device(
1417
+ "mps"
1418
+ if torch.backends.mps.is_available()
1419
+ else "cuda"
1420
+ if torch.cuda.is_available()
1421
+ else "cpu"
1422
+ )
1423
+
1424
+ if isinstance(self.model_args, dict):
1425
+ self.model_args = ",".join(f"{k}={v}" for k, v in self.model_args.items())
1426
+
1427
+ self.model = get_model(self.model_type).create_from_arg_string(
1428
+ self.model_args,
1429
+ {
1430
+ "batch_size": self.batch_size,
1431
+ "device": self.device,
1432
+ },
1433
+ )
1434
+
1435
+ def _is_loaded(self):
1436
+ return hasattr(self, "model") and self.model is not None
1437
+
1438
+
1439
+ class LMMSEvalInferenceEngine(LMMSEvalBaseInferenceEngine):
1440
+ max_new_tokens: int = 32
1441
+ temperature: float = 0.0
1442
+ do_sample: bool = False
1443
+ generate_until: List[str] = ["\n\n"]
1444
+
1445
+ def _infer(
1446
+ self,
1447
+ dataset: Union[List[Dict[str, Any]], DatasetDict],
1448
+ return_meta_data: bool = False,
1449
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
1450
+ self.verify_not_chat_api(dataset)
1451
+ if not self._is_loaded():
1452
+ self._prepare_engine()
1453
+
1454
+ from lmms_eval.api.instance import Instance
1455
+
1456
+ temp_task_name = str(uuid.uuid4())
1457
+
1458
+ requests = []
1459
+ for i, instance in enumerate(dataset):
1460
+ requests.append(
1461
+ Instance(
1462
+ request_type="generate_until",
1463
+ arguments=(
1464
+ get_text_without_images(instance, image_token=self.image_token),
1465
+ {
1466
+ "max_new_tokens": self.max_new_tokens,
1467
+ "temperature": self.temperature,
1468
+ "do_sample": self.do_sample,
1469
+ "until": self.generate_until,
1470
+ },
1471
+ get_images_without_text,
1472
+ i,
1473
+ temp_task_name,
1474
+ "test",
1475
+ ),
1476
+ idx=i,
1477
+ metadata={
1478
+ "task": temp_task_name,
1479
+ "doc_id": i,
1480
+ "repeats": 1,
1481
+ },
1482
+ )
1483
+ )
1484
+
1485
+ self.model.task_dict[temp_task_name] = DatasetDict({"test": dataset})
1486
+
1487
+ responses = self.model.generate_until(requests)
1488
+
1489
+ self.model.task_dict.pop(temp_task_name)
1490
+
1491
+ return responses
1492
+
1493
+
1494
+ class LMMSEvalLoglikelihoodInferenceEngine(LMMSEvalBaseInferenceEngine):
1495
+ request_type: Literal["loglikelihood"] = "loglikelihood"
1496
+
1497
+ def make_instance(self, instance, special_args, index, task_name):
1498
+ from lmms_eval.api.instance import Instance
1499
+
1500
+ return Instance(
1501
+ request_type=self.request_type,
1502
+ arguments=(
1503
+ get_text_without_images(instance, image_token=self.image_token),
1504
+ special_args,
1505
+ get_images_without_text,
1506
+ index,
1507
+ task_name,
1508
+ "test",
1509
+ ),
1510
+ idx=index,
1511
+ metadata={
1512
+ "task": task_name,
1513
+ "doc_id": index,
1514
+ "repeats": 1,
1515
+ },
1516
+ )
1517
+
1518
+ def _infer(
1519
+ self,
1520
+ dataset: Union[List[Dict[str, Any]], DatasetDict],
1521
+ return_meta_data: bool = False,
1522
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
1523
+ if not self._is_loaded():
1524
+ self._prepare_engine()
1525
+
1526
+ temp_task_name = str(uuid.uuid4())
1527
+
1528
+ requests = []
1529
+ for i, instance in enumerate(dataset):
1530
+ task_data = instance["task_data"]
1531
+
1532
+ if isinstance(task_data, str):
1533
+ task_data = json.loads(task_data)
1534
+
1535
+ for option in task_data["options"]:
1536
+ requests.append(
1537
+ self.make_instance(
1538
+ instance,
1539
+ option,
1540
+ i,
1541
+ temp_task_name,
1542
+ )
1543
+ )
1544
+
1545
+ self.model.task_dict[temp_task_name] = DatasetDict({"test": dataset})
1546
+ self.model.metadata = {}
1547
+
1548
+ responses = self.model.loglikelihood(requests)
1549
+
1550
+ self.model.task_dict.pop(temp_task_name)
1551
+
1552
+ optimal_scores = [sys.float_info.max] * len(dataset)
1553
+ optimal_responses = [None] * len(dataset)
1554
+
1555
+ for request, (score, _) in zip(requests, responses):
1556
+ if score < optimal_scores[request.idx]:
1557
+ optimal_scores[request.idx] = score
1558
+ optimal_responses[request.idx] = request.arguments[1]
1559
+
1560
+ return optimal_responses
1561
+
1562
+
1563
+ class VLLMInferenceEngine(
1564
+ InferenceEngine, PackageRequirementsMixin, StandardAPIParamsMixin
1565
+ ):
1566
+ def prepare_engine(self):
1567
+ from vllm import LLM, SamplingParams
1568
+
1569
+ args = self.to_dict([StandardAPIParamsMixin])
1570
+ self.sampling_params = SamplingParams(**args)
1571
+ self.llm = LLM(model=self.model)
1572
+
1573
+ def _infer(
1574
+ self,
1575
+ dataset: Union[List[Dict[str, Any]], DatasetDict],
1576
+ return_meta_data: bool = False,
1577
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
1578
+ inputs = []
1579
+ for instance in dataset:
1580
+ inputs.append(instance["source"])
1581
+
1582
+ if isinstance(inputs[0], list):
1583
+ outputs = self.llm.chat(inputs, self.sampling_params)
1584
+ else:
1585
+ outputs = self.llm.generate(inputs, self.sampling_params)
1586
+
1587
+ predictions = []
1588
+ for output in outputs:
1589
+ predictions.append(output.outputs[0].text)
1590
+
1591
+ return predictions
1592
+
1593
+
1594
+ class AsyncTokenBucket:
1595
+ def __init__(self, rate, capacity):
1596
+ self.rate = rate # Tokens added per second
1597
+ self.capacity = capacity # Maximum tokens in the bucket
1598
+ self.tokens = capacity
1599
+ self.timestamp = time.perf_counter()
1600
+ self.lock = asyncio.Lock()
1601
+ self.interval = 1.0 / self.rate # Time between tokens
1602
+
1603
+ async def acquire(self, tokens=1):
1604
+ while True:
1605
+ async with self.lock:
1606
+ now = time.perf_counter()
1607
+ delta = now - self.timestamp
1608
+
1609
+ # Calculate the number of tokens to add
1610
+ token_intervals = int(delta / self.interval)
1611
+ if token_intervals > 0:
1612
+ self.tokens = min(self.capacity, self.tokens + token_intervals)
1613
+ self.timestamp += token_intervals * self.interval
1614
+ logging.debug(
1615
+ f"Added {token_intervals} tokens. Tokens now: {self.tokens}"
1616
+ )
1617
+
1618
+ if self.tokens >= tokens:
1619
+ self.tokens -= tokens
1620
+ logging.debug(f"Token acquired. Tokens left: {self.tokens}")
1621
+ return
1622
+ # Calculate time until the next token is available
1623
+ time_until_next_token = self.interval - (now - self.timestamp)
1624
+ logging.debug(
1625
+ f"Not enough tokens. Need to wait {time_until_next_token:.4f} seconds."
1626
+ )
1627
+ # Sleep outside the lock to allow other coroutines to proceed
1628
+ await asyncio.sleep(time_until_next_token)
1629
+
1630
+
1631
+ class LiteLLMInferenceEngine(
1632
+ InferenceEngine, StandardAPIParamsMixin, PackageRequirementsMixin
1633
+ ):
1634
+ max_requests_per_second: float = 6
1635
+ max_retries: int = 5 # Set to 0 to prevent internal retries
1636
+
1637
+ _requirements_list: list = ["litellm", "tenacity", "tqdm", "diskcache"]
1638
+
1639
+ def prepare_engine(self):
1640
+ # Initialize the token bucket rate limiter
1641
+ self._rate_limiter = AsyncTokenBucket(
1642
+ rate=self.max_requests_per_second,
1643
+ capacity=self.max_requests_per_second,
1644
+ )
1645
+ self.inference_type = "litellm"
1646
+ import litellm
1647
+ from litellm import acompletion
1648
+ from litellm.caching.caching import Cache
1649
+
1650
+ litellm.cache = Cache(type="disk")
1651
+
1652
+ self._completion = acompletion
1653
+ # Initialize a semaphore to limit concurrency
1654
+ self._semaphore = asyncio.Semaphore(self.max_requests_per_second)
1655
+
1656
+ async def _infer_instance(
1657
+ self, index: int, instance: Dict[str, Any]
1658
+ ) -> TextGenerationInferenceOutput:
1659
+ """Process a single inference request."""
1660
+ async with self._semaphore:
1661
+ await self._rate_limiter.acquire()
1662
+ # Introduce a slight delay to prevent burstiness
1663
+ await asyncio.sleep(0.01)
1664
+ messages = self.to_messages(instance)
1665
+ kwargs = self.to_dict([StandardAPIParamsMixin])
1666
+ try:
1667
+ response = await self._completion(
1668
+ messages=messages,
1669
+ max_retries=self.max_retries,
1670
+ caching=True,
1671
+ **kwargs,
1672
+ )
1673
+ except Exception as e:
1674
+ raise RuntimeError(
1675
+ f"Error inferring the following instance:\n{instance}"
1676
+ ) from e
1677
+
1678
+ usage = response.get("usage", {})
1679
+ return TextGenerationInferenceOutput(
1680
+ prediction=response["choices"][0]["message"]["content"],
1681
+ input_tokens=usage.get("prompt_tokens"),
1682
+ output_tokens=usage.get("completion_tokens"),
1683
+ model_name=response.get("model", self.model),
1684
+ inference_type=self.inference_type,
1685
+ )
1686
+
1687
+ async def _infer_async(
1688
+ self, dataset: List[Dict[str, Any]]
1689
+ ) -> List[TextGenerationInferenceOutput]:
1690
+ """Process multiple inference requests concurrently with a progress bar."""
1691
+ tasks = [
1692
+ self._infer_instance(i, instance) for i, instance in enumerate(dataset)
1693
+ ]
1694
+ # Use tqdm_asyncio.gather to display progress bar
1695
+ return await tqdm_asyncio.gather(
1696
+ *tasks, desc=f"LiteLLM Inference ({self.model})", total=len(tasks)
1697
+ )
1698
+
1699
+ def _infer(
1700
+ self,
1701
+ dataset: Union[List[Dict[str, Any]], "DatasetDict"],
1702
+ return_meta_data: bool = False,
1703
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
1704
+ """Main inference entry point."""
1705
+ loop = asyncio.get_event_loop()
1706
+ responses = loop.run_until_complete(self._infer_async(dataset))
1707
+
1708
+ if return_meta_data:
1709
+ return responses
1710
+
1711
+ return [response.prediction for response in responses]
1712
+
1713
+
1714
+ _supported_apis = Literal[
1715
+ "watsonx", "together-ai", "open-ai", "aws", "ollama", "bam", "watsonx-sdk"
1716
+ ]
1717
+
1718
+
1719
+ class CrossProviderInferenceEngine(InferenceEngine, StandardAPIParamsMixin):
1720
+ """Inference engine capable of dynamically switching between multiple providers APIs.
1721
+
1722
+ This class extends the InferenceEngine and OpenAiInferenceEngineParamsMixin
1723
+ to enable seamless integration with various API providers. The supported APIs are
1724
+ specified in `_supported_apis`, allowing users to interact with multiple models
1725
+ from different sources. The `api_model_map` dictionary maps each API to
1726
+ specific model identifiers, enabling automatic configuration based on
1727
+ user requests.
1728
+
1729
+ Attributes:
1730
+ provider: Optional; Specifies the current API in use. Must be one of the
1731
+ literals in `_supported_apis`.
1732
+ provider_model_map: Dictionary mapping each supported API to a corresponding
1733
+ model identifier string. This mapping allows consistent access to models
1734
+ across different API backends.
1735
+ """
1736
+
1737
+ provider: Optional[_supported_apis] = None
1738
+
1739
+ provider_model_map: Dict[_supported_apis, Dict[str, str]] = {
1740
+ "watsonx": {
1741
+ "llama-3-8b-instruct": "watsonx/meta-llama/llama-3-8b-instruct",
1742
+ "llama-3-70b-instruct": "watsonx/meta-llama/llama-3-70b-instruct",
1743
+ "granite-3-8b-instruct": "watsonx/ibm/granite-3-8b-instruct",
1744
+ "flan-t5-xxl": "watsonx/google/flan-t5-xxl",
1745
+ "llama-3-2-1b-instruct": "watsonx/meta-llama/llama-3-2-1b-instruct",
1746
+ },
1747
+ "watsonx-sdk": {
1748
+ "llama-3-8b-instruct": "meta-llama/llama-3-8b-instruct",
1749
+ "llama-3-70b-instruct": "meta-llama/llama-3-70b-instruct",
1750
+ "granite-3-8b-instruct": "ibm/granite-3-8b-instruct",
1751
+ },
1752
+ "together-ai": {
1753
+ "llama-3-8b-instruct": "together_ai/togethercomputer/llama-3-8b-instruct",
1754
+ "llama-3-70b-instruct": "together_ai/togethercomputer/llama-3-70b-instruct",
1755
+ "llama-3-2-1b-instruct": "together_ai/togethercomputer/llama-3-2-1b-instruct",
1756
+ },
1757
+ "aws": {
1758
+ "llama-3-8b-instruct": "bedrock/meta.llama3-8b-instruct-v1:0",
1759
+ "llama-3-70b-instruct": "bedrock/meta.llama3-70b-instruct-v1:0",
1760
+ },
1761
+ "ollama": {
1762
+ "llama-3-8b-instruct": "llama3:8b",
1763
+ "llama-3-70b-instruct": "llama3:70b",
1764
+ },
1765
+ "bam": {
1766
+ "granite-3-8b-instruct": "ibm/granite-8b-instruct-preview-4k",
1767
+ "llama-3-8b-instruct": "meta-llama/llama-3-8b-instruct",
1768
+ "llama-3-2-1b-instruct": "meta-llama/llama-3-2-1b-instruct",
1769
+ "flan-t5-xxl": "google/flan-t5-xxl",
1770
+ },
1771
+ }
1772
+
1773
+ _provider_to_base_class = {
1774
+ "watsonx": LiteLLMInferenceEngine,
1775
+ "open-ai": LiteLLMInferenceEngine,
1776
+ "together-ai": LiteLLMInferenceEngine,
1777
+ "aws": LiteLLMInferenceEngine,
1778
+ "ollama": OllamaInferenceEngine,
1779
+ "bam": IbmGenAiInferenceEngine,
1780
+ "watsonx-sdk": WMLInferenceEngine,
1781
+ }
1782
+
1783
+ _provider_param_renaming = {
1784
+ "bam": {"max_tokens": "max_new_tokens", "model": "model_name"},
1785
+ "watsonx-sdk": {"max_tokens": "max_new_tokens", "model": "model_name"},
1786
+ }
1787
+
1788
+ def get_provider_name(self):
1789
+ return self.provider if self.provider is not None else settings.default_provider
1790
+
1791
+ def prepare_engine(self):
1792
+ provider = self.get_provider_name()
1793
+ if provider not in self._provider_to_base_class:
1794
+ raise UnitxtError(
1795
+ f"{provider} a known API. Supported apis: {','.join(self.provider_model_map.keys())}"
1796
+ )
1797
+ if self.model not in self.provider_model_map[provider]:
1798
+ raise UnitxtError(
1799
+ f"{self.model} is not configured for provider {provider}. Supported models: {','.join(self.provider_model_map[provider].keys())}"
1800
+ )
1801
+ cls = self.__class__._provider_to_base_class[provider]
1802
+ args = self.to_dict([StandardAPIParamsMixin])
1803
+ args["model"] = self.provider_model_map[provider][self.model]
1804
+ params = list(args.keys())
1805
+ if provider in self._provider_param_renaming:
1806
+ for param in params:
1807
+ if args[param] is not None:
1808
+ if param in self._provider_param_renaming[provider]:
1809
+ args[self._provider_param_renaming[provider][param]] = args[
1810
+ param
1811
+ ]
1812
+ del args[param]
1813
+ else:
1814
+ del args[param]
1815
+ self.engine = cls(**args)
1816
+
1817
+ def _infer(
1818
+ self,
1819
+ dataset: Union[List[Dict[str, Any]], DatasetDict],
1820
+ return_meta_data: bool = False,
1821
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
1822
+ return self.engine._infer(dataset, return_meta_data)
1823
+
1824
+ def get_engine_id(self):
1825
+ api = self.get_provider_name()
1826
+ return get_model_and_label_id(self.provider_model_map[api][self.model], api)
1827
+
1828
+
1829
+ class HFOptionSelectingInferenceEngine(InferenceEngine):
1830
+ """HuggingFace based class for inference engines that calculate log probabilities.
1831
+
1832
+ This class uses models from the HuggingFace Transformers library to calculate log probabilities for text inputs.
1833
+ """
1834
+
1835
+ model_name: str
1836
+ batch_size: int
1837
+
1838
+ _requirements_list = {
1839
+ "transformers": "Install huggingface package using 'pip install --upgrade transformers"
1840
+ }
1841
+
1842
+ def prepare_engine(self):
1843
+ import torch
1844
+ from transformers import AutoModelForCausalLM, AutoTokenizer
1845
+
1846
+ self.device = torch.device(
1847
+ "mps"
1848
+ if torch.backends.mps.is_available()
1849
+ else "cuda"
1850
+ if torch.cuda.is_available()
1851
+ else "cpu"
1852
+ )
1853
+
1854
+ # Load model and tokenizer
1855
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
1856
+ self.model = AutoModelForCausalLM.from_pretrained(self.model_name).to(
1857
+ self.device
1858
+ )
1859
+ # Set pad_token if it doesn't exist
1860
+ if self.tokenizer.pad_token is None:
1861
+ self.tokenizer.pad_token = self.tokenizer.eos_token
1862
+
1863
+ def get_log_probs(self, texts):
1864
+ # Check available device
1865
+ import torch
1866
+ from tqdm import tqdm
1867
+
1868
+ log_probs = []
1869
+
1870
+ # Process texts in batches
1871
+ for i in tqdm(range(0, len(texts), self.batch_size)):
1872
+ batch = texts[i : i + self.batch_size]
1873
+
1874
+ # Tokenize batch
1875
+ if isinstance(texts[0], list):
1876
+ batch = self.tokenizer.apply_chat_template(batch, tokenize=False)
1877
+
1878
+ inputs = self.tokenizer(
1879
+ batch, return_tensors="pt", padding=True, truncation=True
1880
+ ).to(self.device)
1881
+
1882
+ # Compute log probabilities
1883
+ with torch.no_grad():
1884
+ predictions = self.model(**inputs)
1885
+ logits = predictions.logits
1886
+
1887
+ for j in range(len(batch)):
1888
+ input_ids = inputs.input_ids[j]
1889
+ text_logits = logits[j, :-1, :] # exclude last token
1890
+ text_log_probs = torch.log_softmax(text_logits, dim=-1)
1891
+
1892
+ # Gather log probs for each token
1893
+ token_log_probs = text_log_probs[
1894
+ torch.arange(text_logits.shape[0]), input_ids[1:]
1895
+ ]
1896
+
1897
+ # Sum log probs to get sequence log prob
1898
+ sequence_log_prob = token_log_probs.sum().item()
1899
+ log_probs.append(sequence_log_prob)
1900
+
1901
+ return log_probs
1902
+
1903
+ def _infer(
1904
+ self,
1905
+ dataset: Union[List[Dict[str, Any]], DatasetDict],
1906
+ return_meta_data: bool = False,
1907
+ ) -> Union[List[str], List[TextGenerationInferenceOutput]]:
1908
+ inputs = []
1909
+
1910
+ for instance in dataset:
1911
+ for option in instance["task_data"]["options"]:
1912
+ if isinstance(instance["source"], list):
1913
+ inputs.append(
1914
+ instance["source"] + [{"role": "assistant", "content": option}]
1915
+ )
1916
+ else:
1917
+ inputs.append(instance["source"] + option)
1918
+
1919
+ scores = self.get_log_probs(inputs)
1920
+
1921
+ scores_iterator = iter(scores)
1922
+
1923
+ predictions = []
1924
+ for instance in dataset:
1925
+ options_scores = Counter()
1926
+ for option in instance["task_data"]["options"]:
1927
+ score = next(scores_iterator)
1928
+ options_scores[option] = score
1929
+ predictions.append(options_scores.most_common(1)[0][0])
1930
+
1931
+ return predictions
llm_as_judge.py CHANGED
@@ -2,12 +2,12 @@ from abc import abstractmethod
2
  from typing import Any, Dict, List, Literal, Optional
3
 
4
  from .api import infer
5
- from .artifact import fetch_artifact
6
  from .dataclass import Field
7
  from .formats import Format, SystemFormat
8
  from .inference import InferenceEngine, LogProbInferenceEngine, OpenAiInferenceEngine
9
  from .metrics import BulkInstanceMetric
10
  from .operator import SequentialOperator
 
11
  from .settings_utils import get_settings
12
  from .system_prompts import EmptySystemPrompt, SystemPrompt
13
  from .templates import Template
@@ -122,7 +122,7 @@ class LLMAsJudgeBase(BulkInstanceMetric):
122
  pass
123
 
124
 
125
- class LLMAsJudge(LLMAsJudgeBase):
126
  """LLM-as-judge-based metric class for evaluating correctness of generated predictions.
127
 
128
  This class uses the source prompt given to the generator and the generator's predictions to evaluate
@@ -156,7 +156,7 @@ class LLMAsJudge(LLMAsJudgeBase):
156
  instances = []
157
  for task_data_instance in task_data:
158
  template = task_data_instance["metadata"]["template"]
159
- template, _ = fetch_artifact(template)
160
  instance = SequentialOperator(
161
  steps=[template, "formats.empty"]
162
  ).process_instance(
@@ -176,6 +176,26 @@ class LLMAsJudge(LLMAsJudgeBase):
176
  def _get_instance_for_judge_model(
177
  self, input_instances: List[str], predictions: List, references: List
178
  ) -> List[Dict]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  if self.task == "rating.single_turn":
180
  instances = [
181
  {
@@ -183,7 +203,7 @@ class LLMAsJudge(LLMAsJudgeBase):
183
  "answer": prediction,
184
  }
185
  for input_instance, prediction, reference in zip(
186
- input_instances, predictions, references
187
  )
188
  ]
189
  elif self.task == "rating.single_turn_with_reference":
@@ -194,7 +214,7 @@ class LLMAsJudge(LLMAsJudgeBase):
194
  "reference_answer": reference[0],
195
  }
196
  for input_instance, prediction, reference in zip(
197
- input_instances, predictions, references
198
  )
199
  ]
200
  elif self.task == "pairwise_comparative_rating.single_turn":
@@ -207,7 +227,7 @@ class LLMAsJudge(LLMAsJudgeBase):
207
  "model_b": "baseline_model",
208
  }
209
  for input_instance, prediction, reference in zip(
210
- input_instances, predictions, references
211
  )
212
  ]
213
  else:
@@ -262,14 +282,14 @@ class LLMAsJudge(LLMAsJudgeBase):
262
 
263
  result = {
264
  self.main_score: model_a_preference_score,
265
- "judge_raw_output": instance["raw_prediction"],
266
- "judge_raw_input": instance["source"],
267
  }
268
  else:
269
  result = {
270
  self.main_score: instance["prediction"],
271
- "judge_raw_output": instance["raw_prediction"],
272
- "judge_raw_input": instance["source"],
273
  }
274
  results.append(result)
275
  return results
@@ -394,6 +414,13 @@ class TaskBasedLLMasJudge(LLMAsJudgeBase):
394
  if self.prediction_field and prediction:
395
  instance_task_data[self.prediction_field] = str(prediction)
396
  instance_task_data = judge_task.process(instance_task_data)["input_fields"]
 
 
 
 
 
 
 
397
  instances.append(instance_task_data)
398
 
399
  return instances
 
2
  from typing import Any, Dict, List, Literal, Optional
3
 
4
  from .api import infer
 
5
  from .dataclass import Field
6
  from .formats import Format, SystemFormat
7
  from .inference import InferenceEngine, LogProbInferenceEngine, OpenAiInferenceEngine
8
  from .metrics import BulkInstanceMetric
9
  from .operator import SequentialOperator
10
+ from .operators import ArtifactFetcherMixin
11
  from .settings_utils import get_settings
12
  from .system_prompts import EmptySystemPrompt, SystemPrompt
13
  from .templates import Template
 
122
  pass
123
 
124
 
125
+ class LLMAsJudge(LLMAsJudgeBase, ArtifactFetcherMixin):
126
  """LLM-as-judge-based metric class for evaluating correctness of generated predictions.
127
 
128
  This class uses the source prompt given to the generator and the generator's predictions to evaluate
 
156
  instances = []
157
  for task_data_instance in task_data:
158
  template = task_data_instance["metadata"]["template"]
159
+ template = self.get_artifact(template)
160
  instance = SequentialOperator(
161
  steps=[template, "formats.empty"]
162
  ).process_instance(
 
176
  def _get_instance_for_judge_model(
177
  self, input_instances: List[str], predictions: List, references: List
178
  ) -> List[Dict]:
179
+ string_input_instances = []
180
+
181
+ for input_instance in input_instances:
182
+ if isinstance(input_instance, str):
183
+ string_input_instances.append(input_instance)
184
+ if isinstance(input_instance, list): # chat api
185
+ if len(input_instance) == 1: # only user
186
+ string_input_instances.append(input_instance[0]["content"])
187
+ if len(input_instance) == 2: # only system and user
188
+ string_input_instances.append(
189
+ input_instance[0]["content"]
190
+ + "\n"
191
+ + input_instance[1]["content"]
192
+ )
193
+ else: # num demos > 0
194
+ turns = []
195
+ for turn in input_instance:
196
+ turns.append(f'{turn["role"]}: {turn["content"]}')
197
+ string_input_instances.append("\n".join(turns))
198
+
199
  if self.task == "rating.single_turn":
200
  instances = [
201
  {
 
203
  "answer": prediction,
204
  }
205
  for input_instance, prediction, reference in zip(
206
+ string_input_instances, predictions, references
207
  )
208
  ]
209
  elif self.task == "rating.single_turn_with_reference":
 
214
  "reference_answer": reference[0],
215
  }
216
  for input_instance, prediction, reference in zip(
217
+ string_input_instances, predictions, references
218
  )
219
  ]
220
  elif self.task == "pairwise_comparative_rating.single_turn":
 
227
  "model_b": "baseline_model",
228
  }
229
  for input_instance, prediction, reference in zip(
230
+ string_input_instances, predictions, references
231
  )
232
  ]
233
  else:
 
282
 
283
  result = {
284
  self.main_score: model_a_preference_score,
285
+ f"{self.main_score}_judge_raw_output": instance["raw_prediction"],
286
+ f"{self.main_score}_judge_raw_input": instance["source"],
287
  }
288
  else:
289
  result = {
290
  self.main_score: instance["prediction"],
291
+ f"{self.main_score}_judge_raw_output": instance["raw_prediction"],
292
+ f"{self.main_score}_judge_raw_input": instance["source"],
293
  }
294
  results.append(result)
295
  return results
 
414
  if self.prediction_field and prediction:
415
  instance_task_data[self.prediction_field] = str(prediction)
416
  instance_task_data = judge_task.process(instance_task_data)["input_fields"]
417
+
418
+ data_classification_policy = input_instance.get("metadata", {}).get(
419
+ "data_classification_policy"
420
+ )
421
+ instance_task_data[
422
+ "data_classification_policy"
423
+ ] = data_classification_policy
424
  instances.append(instance_task_data)
425
 
426
  return instances
loaders.py CHANGED
@@ -38,14 +38,14 @@ import tempfile
38
  from abc import abstractmethod
39
  from pathlib import Path
40
  from tempfile import TemporaryDirectory
41
- from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
42
 
43
  import pandas as pd
44
  from datasets import load_dataset as hf_load_dataset
45
  from huggingface_hub import HfApi
46
  from tqdm import tqdm
47
 
48
- from .dataclass import InternalField, OptionalField
49
  from .fusion import FixedFusion
50
  from .logging_utils import get_logger
51
  from .operator import SourceOperator
@@ -53,7 +53,7 @@ from .operators import Set
53
  from .settings_utils import get_settings
54
  from .stream import DynamicStream, MultiStream
55
  from .type_utils import isoftype
56
- from .utils import recursive_copy
57
 
58
  logger = get_logger()
59
  settings = get_settings()
@@ -81,7 +81,10 @@ class Loader(SourceOperator):
81
  streaming: bool = False
82
  num_proc: int = None
83
 
84
- def get_limit(self):
 
 
 
85
  if settings.global_loader_limit is not None and self.loader_limit is not None:
86
  return min(int(settings.global_loader_limit), self.loader_limit)
87
  if settings.global_loader_limit is not None:
@@ -132,10 +135,22 @@ class Loader(SourceOperator):
132
  self.data_classification_policy = default_data_classification_policy
133
 
134
  @abstractmethod
135
- def load_data(self):
 
 
 
136
  pass
137
 
 
 
 
 
 
 
 
 
138
  def process(self) -> MultiStream:
 
139
  return self.add_data_classification(self.load_data())
140
 
141
 
@@ -175,7 +190,6 @@ class LoadHF(Loader):
175
  streaming: bool = True
176
  filtering_lambda: Optional[str] = None
177
  num_proc: Optional[int] = None
178
- _cache: dict = InternalField(default=None)
179
  requirements_list: List[str] = OptionalField(default_factory=list)
180
 
181
  def verify(self):
@@ -193,39 +207,33 @@ class LoadHF(Loader):
193
  return dataset.filter(eval(self.filtering_lambda))
194
 
195
  def stream_dataset(self):
196
- if self._cache is None:
197
- with tempfile.TemporaryDirectory() as dir_to_be_deleted:
198
- if settings.disable_hf_datasets_cache and not self.streaming:
199
- cache_dir = dir_to_be_deleted
200
- else:
201
- cache_dir = None
202
- try:
203
- dataset = hf_load_dataset(
204
- self.path,
205
- name=self.name,
206
- data_dir=self.data_dir,
207
- data_files=self.data_files,
208
- revision=self.revision,
209
- streaming=self.streaming,
210
- cache_dir=cache_dir,
211
- split=self.split,
212
- trust_remote_code=settings.allow_unverified_code,
213
- num_proc=self.num_proc,
214
- )
215
- except ValueError as e:
216
- if "trust_remote_code" in str(e):
217
- raise ValueError(
218
- f"{self.__class__.__name__} cannot run remote code from huggingface without setting unitxt.settings.allow_unverified_code=True or by setting environment variable: UNITXT_ALLOW_UNVERIFIED_CODE."
219
- ) from e
220
- raise e
221
-
222
- if self.split is not None:
223
- dataset = {self.split: dataset}
224
-
225
- self._cache = dataset
226
 
227
- else:
228
- dataset = self._cache
229
 
230
  if self.filtering_lambda is not None:
231
  dataset = self.filter_load(dataset)
@@ -233,41 +241,35 @@ class LoadHF(Loader):
233
  return dataset
234
 
235
  def load_dataset(self):
236
- if self._cache is None:
237
- with tempfile.TemporaryDirectory() as dir_to_be_deleted:
238
- if settings.disable_hf_datasets_cache:
239
- cache_dir = dir_to_be_deleted
240
- else:
241
- cache_dir = None
242
- try:
243
- dataset = hf_load_dataset(
244
- self.path,
245
- name=self.name,
246
- data_dir=self.data_dir,
247
- data_files=self.data_files,
248
- streaming=False,
249
- keep_in_memory=True,
250
- cache_dir=cache_dir,
251
- split=self.split,
252
- trust_remote_code=settings.allow_unverified_code,
253
- num_proc=self.num_proc,
254
- )
255
- except ValueError as e:
256
- if "trust_remote_code" in str(e):
257
- raise ValueError(
258
- f"{self.__class__.__name__} cannot run remote code from huggingface without setting unitxt.settings.allow_unverified_code=True or by setting environment variable: UNITXT_ALLOW_UNVERIFIED_CODE."
259
- ) from e
260
-
261
- if self.split is None:
262
- for split in dataset.keys():
263
- dataset[split] = dataset[split].to_iterable_dataset()
264
  else:
265
- dataset = {self.split: dataset}
266
-
267
- self._cache = dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
 
 
 
269
  else:
270
- dataset = self._cache
271
 
272
  if self.filtering_lambda is not None:
273
  dataset = self.filter_load(dataset)
@@ -285,11 +287,11 @@ class LoadHF(Loader):
285
  generator=self.split_limited_load,
286
  gen_kwargs={"dataset": dataset, "split_name": name},
287
  )
288
- for name in self._cache.keys()
289
  }
290
  )
291
 
292
- def load_data(self):
293
  if os.path.exists(self.path):
294
  self.sef_default_data_classification(
295
  ["proprietary"], "when loading from local files"
@@ -298,6 +300,8 @@ class LoadHF(Loader):
298
  self.sef_default_data_classification(
299
  ["public"], "when loading from Huggingface hub"
300
  )
 
 
301
  try:
302
  dataset = self.stream_dataset()
303
  except (
@@ -308,7 +312,7 @@ class LoadHF(Loader):
308
  if self.get_limit() is not None:
309
  return self.limited_load(dataset=dataset)
310
 
311
- return MultiStream.from_iterables(dataset)
312
 
313
 
314
  class LoadCSV(Loader):
@@ -333,58 +337,26 @@ class LoadCSV(Loader):
333
 
334
  files: Dict[str, str]
335
  chunksize: int = 1000
336
- _cache = InternalField(default_factory=dict)
337
  loader_limit: Optional[int] = None
338
  streaming: bool = True
339
  sep: str = ","
340
 
341
- def stream_csv(self, file):
342
- if self.get_limit() is not None:
343
- self.log_limited_loading()
344
- chunksize = min(self.get_limit(), self.chunksize)
345
- else:
346
- chunksize = self.chunksize
347
-
348
- row_count = 0
349
- for chunk in pd.read_csv(file, chunksize=chunksize, sep=self.sep):
350
- for _, row in chunk.iterrows():
351
- if self.get_limit() is not None and row_count >= self.get_limit():
352
- return
353
- yield row.to_dict()
354
- row_count += 1
355
-
356
- def load_csv(self, file):
357
- if file not in self._cache:
358
- if self.get_limit() is not None:
359
- self.log_limited_loading()
360
- self._cache[file] = pd.read_csv(
361
- file, nrows=self.get_limit(), sep=self.sep
362
- ).to_dict("records")
363
- else:
364
- self._cache[file] = pd.read_csv(file).to_dict("records")
365
-
366
- yield from self._cache[file]
367
-
368
- def load_data(self):
369
  self.sef_default_data_classification(
370
  ["proprietary"], "when loading from local files"
371
  )
372
- if self.streaming:
373
- return MultiStream(
374
- {
375
- name: DynamicStream(
376
- generator=self.stream_csv, gen_kwargs={"file": file}
377
- )
378
- for name, file in self.files.items()
379
- }
380
- )
381
 
382
- return MultiStream(
383
- {
384
- name: DynamicStream(generator=self.load_csv, gen_kwargs={"file": file})
385
- for name, file in self.files.items()
386
- }
387
- )
 
 
 
 
 
388
 
389
 
390
  class LoadFromSklearn(Loader):
@@ -407,7 +379,9 @@ class LoadFromSklearn(Loader):
407
  dataset_name: str
408
  splits: List[str] = ["train", "test"]
409
 
410
- _requirements_list: List[str] = ["sklearn", "pandas"]
 
 
411
 
412
  def verify(self):
413
  super().verify()
@@ -421,7 +395,7 @@ class LoadFromSklearn(Loader):
421
 
422
  self.downloader = getattr(sklearn_datatasets, f"fetch_{self.dataset_name}")
423
 
424
- def load_data(self):
425
  with TemporaryDirectory() as temp_directory:
426
  for split in self.splits:
427
  split_data = self.downloader(subset=split)
@@ -429,9 +403,7 @@ class LoadFromSklearn(Loader):
429
  df = pd.DataFrame([split_data["data"], targets]).T
430
  df.columns = ["data", "target"]
431
  df.to_csv(os.path.join(temp_directory, f"{split}.csv"), index=None)
432
- dataset = hf_load_dataset(temp_directory, streaming=False)
433
-
434
- return MultiStream.from_iterables(dataset)
435
 
436
 
437
  class MissingKaggleCredentialsError(ValueError):
@@ -475,12 +447,10 @@ class LoadFromKaggle(Loader):
475
 
476
  self.downloader = download
477
 
478
- def load_data(self):
479
  with TemporaryDirectory() as temp_directory:
480
  self.downloader(self.url, temp_directory)
481
- dataset = hf_load_dataset(temp_directory, streaming=False)
482
-
483
- return MultiStream.from_iterables(dataset)
484
 
485
 
486
  class LoadFromIBMCloud(Loader):
@@ -527,7 +497,7 @@ class LoadFromIBMCloud(Loader):
527
  caching: bool = True
528
  data_classification_policy = ["proprietary"]
529
 
530
- _requirements_list: List[str] = ["ibm_boto3"]
531
 
532
  def _download_from_cos(self, cos, bucket_name, item_name, local_file):
533
  logger.info(f"Downloading {item_name} from {bucket_name} COS")
@@ -595,13 +565,15 @@ class LoadFromIBMCloud(Loader):
595
  if self.streaming:
596
  raise NotImplementedError("LoadFromKaggle cannot load with streaming.")
597
 
598
- def load_data(self):
599
- if not self.verified:
600
- self.lazy_verify()
601
- self.verified = True
602
  self.sef_default_data_classification(
603
  ["proprietary"], "when loading from IBM COS"
604
  )
 
 
 
 
 
605
  import ibm_boto3
606
 
607
  cos = ibm_boto3.resource(
@@ -658,7 +630,7 @@ class LoadFromIBMCloud(Loader):
658
  field=self.data_field,
659
  )
660
 
661
- return MultiStream.from_iterables(dataset)
662
 
663
 
664
  class MultipleSourceLoader(Loader):
@@ -692,6 +664,9 @@ class MultipleSourceLoader(Loader):
692
  return multi_stream
693
  return super().add_data_classification(multi_stream)
694
 
 
 
 
695
  def load_data(self):
696
  return FixedFusion(
697
  subsets=self.sources, max_instances_per_subset=self.get_limit()
@@ -741,11 +716,13 @@ class LoadFromDictionary(Loader):
741
  f"instance {instance} has different fields different from {first_instance}"
742
  )
743
 
744
- def load_data(self) -> MultiStream:
745
  self.sef_default_data_classification(
746
  ["proprietary"], "when loading from python dictionary"
747
  )
748
- return MultiStream.from_iterables(recursive_copy(self.data))
 
 
749
 
750
 
751
  class LoadFromHFSpace(LoadHF):
@@ -915,10 +892,12 @@ class LoadFromHFSpace(LoadHF):
915
  f"Loader does not support input 'data_files' of type {type(self.data_files)}"
916
  )
917
 
918
- def load_data(self):
919
  self.sef_default_data_classification(
920
  ["public"], "when loading from Huggingface spaces"
921
  )
 
 
922
  self._map_wildcard_path_to_full_paths()
923
  self.path = self._download_data()
924
  return super().load_data()
 
38
  from abc import abstractmethod
39
  from pathlib import Path
40
  from tempfile import TemporaryDirectory
41
+ from typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence, Union
42
 
43
  import pandas as pd
44
  from datasets import load_dataset as hf_load_dataset
45
  from huggingface_hub import HfApi
46
  from tqdm import tqdm
47
 
48
+ from .dataclass import OptionalField
49
  from .fusion import FixedFusion
50
  from .logging_utils import get_logger
51
  from .operator import SourceOperator
 
53
  from .settings_utils import get_settings
54
  from .stream import DynamicStream, MultiStream
55
  from .type_utils import isoftype
56
+ from .utils import LRUCache
57
 
58
  logger = get_logger()
59
  settings = get_settings()
 
81
  streaming: bool = False
82
  num_proc: int = None
83
 
84
+ # class level shared cache:
85
+ _loader_cache = LRUCache(max_size=settings.loader_cache_size)
86
+
87
+ def get_limit(self) -> int:
88
  if settings.global_loader_limit is not None and self.loader_limit is not None:
89
  return min(int(settings.global_loader_limit), self.loader_limit)
90
  if settings.global_loader_limit is not None:
 
135
  self.data_classification_policy = default_data_classification_policy
136
 
137
  @abstractmethod
138
+ def load_iterables(self) -> Dict[str, Iterable]:
139
+ pass
140
+
141
+ def _maybe_set_classification_policy(self):
142
  pass
143
 
144
+ def load_data(self) -> MultiStream:
145
+ iterables = self.__class__._loader_cache.get(str(self), None)
146
+ if iterables is None:
147
+ iterables = self.load_iterables()
148
+ self.__class__._loader_cache.max_size = settings.loader_cache_size
149
+ self.__class__._loader_cache[str(self)] = iterables
150
+ return MultiStream.from_iterables(iterables, copying=True)
151
+
152
  def process(self) -> MultiStream:
153
+ self._maybe_set_classification_policy()
154
  return self.add_data_classification(self.load_data())
155
 
156
 
 
190
  streaming: bool = True
191
  filtering_lambda: Optional[str] = None
192
  num_proc: Optional[int] = None
 
193
  requirements_list: List[str] = OptionalField(default_factory=list)
194
 
195
  def verify(self):
 
207
  return dataset.filter(eval(self.filtering_lambda))
208
 
209
  def stream_dataset(self):
210
+ with tempfile.TemporaryDirectory() as dir_to_be_deleted:
211
+ if settings.disable_hf_datasets_cache and not self.streaming:
212
+ cache_dir = dir_to_be_deleted
213
+ else:
214
+ cache_dir = None
215
+ try:
216
+ dataset = hf_load_dataset(
217
+ self.path,
218
+ name=self.name,
219
+ data_dir=self.data_dir,
220
+ data_files=self.data_files,
221
+ revision=self.revision,
222
+ streaming=self.streaming,
223
+ cache_dir=cache_dir,
224
+ split=self.split,
225
+ trust_remote_code=settings.allow_unverified_code,
226
+ num_proc=self.num_proc,
227
+ )
228
+ except ValueError as e:
229
+ if "trust_remote_code" in str(e):
230
+ raise ValueError(
231
+ f"{self.__class__.__name__} cannot run remote code from huggingface without setting unitxt.settings.allow_unverified_code=True or by setting environment variable: UNITXT_ALLOW_UNVERIFIED_CODE."
232
+ ) from e
233
+ raise e
 
 
 
 
 
 
234
 
235
+ if self.split is not None:
236
+ dataset = {self.split: dataset}
237
 
238
  if self.filtering_lambda is not None:
239
  dataset = self.filter_load(dataset)
 
241
  return dataset
242
 
243
  def load_dataset(self):
244
+ with tempfile.TemporaryDirectory() as dir_to_be_deleted:
245
+ if settings.disable_hf_datasets_cache:
246
+ cache_dir = dir_to_be_deleted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
  else:
248
+ cache_dir = None
249
+ try:
250
+ dataset = hf_load_dataset(
251
+ self.path,
252
+ name=self.name,
253
+ data_dir=self.data_dir,
254
+ data_files=self.data_files,
255
+ streaming=False,
256
+ keep_in_memory=True,
257
+ cache_dir=cache_dir,
258
+ split=self.split,
259
+ trust_remote_code=settings.allow_unverified_code,
260
+ num_proc=self.num_proc,
261
+ )
262
+ except ValueError as e:
263
+ if "trust_remote_code" in str(e):
264
+ raise ValueError(
265
+ f"{self.__class__.__name__} cannot run remote code from huggingface without setting unitxt.settings.allow_unverified_code=True or by setting environment variable: UNITXT_ALLOW_UNVERIFIED_CODE."
266
+ ) from e
267
 
268
+ if self.split is None:
269
+ for split in dataset.keys():
270
+ dataset[split] = dataset[split].to_iterable_dataset()
271
  else:
272
+ dataset = {self.split: dataset}
273
 
274
  if self.filtering_lambda is not None:
275
  dataset = self.filter_load(dataset)
 
287
  generator=self.split_limited_load,
288
  gen_kwargs={"dataset": dataset, "split_name": name},
289
  )
290
+ for name in dataset.keys()
291
  }
292
  )
293
 
294
+ def _maybe_set_classification_policy(self):
295
  if os.path.exists(self.path):
296
  self.sef_default_data_classification(
297
  ["proprietary"], "when loading from local files"
 
300
  self.sef_default_data_classification(
301
  ["public"], "when loading from Huggingface hub"
302
  )
303
+
304
+ def load_iterables(self):
305
  try:
306
  dataset = self.stream_dataset()
307
  except (
 
312
  if self.get_limit() is not None:
313
  return self.limited_load(dataset=dataset)
314
 
315
+ return dataset
316
 
317
 
318
  class LoadCSV(Loader):
 
337
 
338
  files: Dict[str, str]
339
  chunksize: int = 1000
 
340
  loader_limit: Optional[int] = None
341
  streaming: bool = True
342
  sep: str = ","
343
 
344
+ def _maybe_set_classification_policy(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
  self.sef_default_data_classification(
346
  ["proprietary"], "when loading from local files"
347
  )
 
 
 
 
 
 
 
 
 
348
 
349
+ def load_iterables(self):
350
+ iterables = {}
351
+ for split_name, file_path in self.files.items():
352
+ if self.get_limit() is not None:
353
+ self.log_limited_loading()
354
+ iterables[split_name] = pd.read_csv(
355
+ file_path, nrows=self.get_limit(), sep=self.sep
356
+ ).to_dict("records")
357
+ else:
358
+ iterables[split_name] = pd.read_csv(file_path).to_dict("records")
359
+ return iterables
360
 
361
 
362
  class LoadFromSklearn(Loader):
 
379
  dataset_name: str
380
  splits: List[str] = ["train", "test"]
381
 
382
+ _requirements_list: List[str] = ["scikit-learn", "pandas"]
383
+
384
+ data_classification_policy = ["public"]
385
 
386
  def verify(self):
387
  super().verify()
 
395
 
396
  self.downloader = getattr(sklearn_datatasets, f"fetch_{self.dataset_name}")
397
 
398
+ def load_iterables(self):
399
  with TemporaryDirectory() as temp_directory:
400
  for split in self.splits:
401
  split_data = self.downloader(subset=split)
 
403
  df = pd.DataFrame([split_data["data"], targets]).T
404
  df.columns = ["data", "target"]
405
  df.to_csv(os.path.join(temp_directory, f"{split}.csv"), index=None)
406
+ return hf_load_dataset(temp_directory, streaming=False)
 
 
407
 
408
 
409
  class MissingKaggleCredentialsError(ValueError):
 
447
 
448
  self.downloader = download
449
 
450
+ def load_iterables(self):
451
  with TemporaryDirectory() as temp_directory:
452
  self.downloader(self.url, temp_directory)
453
+ return hf_load_dataset(temp_directory, streaming=False)
 
 
454
 
455
 
456
  class LoadFromIBMCloud(Loader):
 
497
  caching: bool = True
498
  data_classification_policy = ["proprietary"]
499
 
500
+ _requirements_list: List[str] = ["ibm-cos-sdk"]
501
 
502
  def _download_from_cos(self, cos, bucket_name, item_name, local_file):
503
  logger.info(f"Downloading {item_name} from {bucket_name} COS")
 
565
  if self.streaming:
566
  raise NotImplementedError("LoadFromKaggle cannot load with streaming.")
567
 
568
+ def _maybe_set_classification_policy(self):
 
 
 
569
  self.sef_default_data_classification(
570
  ["proprietary"], "when loading from IBM COS"
571
  )
572
+
573
+ def load_iterables(self):
574
+ if not self.verified:
575
+ self.lazy_verify()
576
+ self.verified = True
577
  import ibm_boto3
578
 
579
  cos = ibm_boto3.resource(
 
630
  field=self.data_field,
631
  )
632
 
633
+ return dataset
634
 
635
 
636
  class MultipleSourceLoader(Loader):
 
664
  return multi_stream
665
  return super().add_data_classification(multi_stream)
666
 
667
+ def load_iterables(self):
668
+ pass
669
+
670
  def load_data(self):
671
  return FixedFusion(
672
  subsets=self.sources, max_instances_per_subset=self.get_limit()
 
716
  f"instance {instance} has different fields different from {first_instance}"
717
  )
718
 
719
+ def _maybe_set_classification_policy(self):
720
  self.sef_default_data_classification(
721
  ["proprietary"], "when loading from python dictionary"
722
  )
723
+
724
+ def load_iterables(self) -> MultiStream:
725
+ return self.data
726
 
727
 
728
  class LoadFromHFSpace(LoadHF):
 
892
  f"Loader does not support input 'data_files' of type {type(self.data_files)}"
893
  )
894
 
895
+ def _maybe_set_classification_policy(self):
896
  self.sef_default_data_classification(
897
  ["public"], "when loading from Huggingface spaces"
898
  )
899
+
900
+ def load_data(self):
901
  self._map_wildcard_path_to_full_paths()
902
  self.path = self._download_data()
903
  return super().load_data()
metric_utils.py CHANGED
@@ -1,4 +1,5 @@
1
  import json
 
2
  from collections import defaultdict
3
  from functools import lru_cache
4
  from statistics import mean
@@ -8,6 +9,7 @@ from datasets import Features, Value
8
 
9
  from .dataclass import Dataclass
10
  from .operator import (
 
11
  MultiStreamOperator,
12
  SequentialOperator,
13
  SequentialOperatorInitializer,
@@ -16,6 +18,7 @@ from .operator import (
16
  from .operators import (
17
  ApplyMetric,
18
  ApplyOperatorsField,
 
19
  FlattenInstances,
20
  RecursiveCopy,
21
  Rename,
@@ -25,7 +28,7 @@ from .schema import UNITXT_DATASET_SCHEMA
25
  from .settings_utils import get_constants, get_settings
26
  from .stream import DynamicStream, MultiStream
27
  from .struct_data_operators import LoadJson
28
- from .utils import recursive_shallow_copy
29
 
30
  constants = get_constants()
31
 
@@ -52,6 +55,21 @@ class FromPredictionsAndOriginalData(StreamInitializerOperator):
52
  )
53
 
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  _post_process_steps = SequentialOperator(
56
  steps=[
57
  RecursiveCopy(
@@ -67,6 +85,7 @@ _post_process_steps = SequentialOperator(
67
  field="source",
68
  to_field="task_data/source",
69
  ),
 
70
  ApplyOperatorsField(
71
  operators_field="postprocessors",
72
  ),
@@ -250,7 +269,7 @@ class JoinSubsetsAndGroups(MultiStreamOperator):
250
  sorted_instances = []
251
  for key in sorted(stream_instances.keys()):
252
  instance = stream_instances[key]
253
- instance["score"].update(recursive_shallow_copy(score))
254
  sorted_instances.append(instance)
255
  result[stream_name] = sorted_instances
256
 
 
1
  import json
2
+ import re
3
  from collections import defaultdict
4
  from functools import lru_cache
5
  from statistics import mean
 
9
 
10
  from .dataclass import Dataclass
11
  from .operator import (
12
+ InstanceOperator,
13
  MultiStreamOperator,
14
  SequentialOperator,
15
  SequentialOperatorInitializer,
 
18
  from .operators import (
19
  ApplyMetric,
20
  ApplyOperatorsField,
21
+ ArtifactFetcherMixin,
22
  FlattenInstances,
23
  RecursiveCopy,
24
  Rename,
 
28
  from .settings_utils import get_constants, get_settings
29
  from .stream import DynamicStream, MultiStream
30
  from .struct_data_operators import LoadJson
31
+ from .utils import recursive_copy
32
 
33
  constants = get_constants()
34
 
 
55
  )
56
 
57
 
58
+ class DeleteTargetPrefix(InstanceOperator, ArtifactFetcherMixin):
59
+ def process(
60
+ self, instance: Dict[str, Any], stream_name: Optional[str] = None
61
+ ) -> Dict[str, Any]:
62
+ if "metadata" in instance["task_data"]:
63
+ target_prefix = self.get_artifact(
64
+ instance["task_data"]["metadata"]["template"]
65
+ ).target_prefix
66
+ if target_prefix is not None and len(target_prefix) > 0:
67
+ target_prefix = target_prefix.format(**instance["task_data"])
68
+ pattern = rf"^\s*{re.escape(target_prefix)}\s*"
69
+ instance["prediction"] = re.sub(pattern, "", instance["prediction"])
70
+ return instance
71
+
72
+
73
  _post_process_steps = SequentialOperator(
74
  steps=[
75
  RecursiveCopy(
 
85
  field="source",
86
  to_field="task_data/source",
87
  ),
88
+ DeleteTargetPrefix(),
89
  ApplyOperatorsField(
90
  operators_field="postprocessors",
91
  ),
 
269
  sorted_instances = []
270
  for key in sorted(stream_instances.keys()):
271
  instance = stream_instances[key]
272
+ instance["score"].update(recursive_copy(score))
273
  sorted_instances.append(instance)
274
  result[stream_name] = sorted_instances
275
 
metrics.py CHANGED
@@ -17,7 +17,7 @@ import pandas as pd
17
  from scipy.stats import bootstrap
18
  from scipy.stats._warnings_errors import DegenerateDataWarning
19
 
20
- from .artifact import Artifact, fetch_artifact
21
  from .dataclass import (
22
  AbstractField,
23
  InternalField,
@@ -32,11 +32,12 @@ from .metric_utils import InstanceInput, MetricRequest, MetricResponse
32
  from .operator import (
33
  InstanceOperator,
34
  MultiStreamOperator,
 
35
  SequentialOperator,
36
  StreamingOperator,
37
  StreamOperator,
38
  )
39
- from .operators import Copy, Set
40
  from .random_utils import get_seed
41
  from .settings_utils import get_settings
42
  from .stream import MultiStream, Stream
@@ -64,7 +65,11 @@ def nan_mean(x):
64
  # RuntimeWarning that it is calculating the mean of an empty slice (with no non-Nans)
65
  # this is the desired behavior, but we want to avoid the warning here
66
  warnings.simplefilter("ignore", category=RuntimeWarning)
67
- return np.nanmean(x)
 
 
 
 
68
 
69
 
70
  def nan_max(x):
@@ -1341,6 +1346,8 @@ class ANLS(InstanceMetric):
1341
  reduction_map = {"mean": ["anls"]}
1342
  prediction_type = Any # string representation is compared
1343
 
 
 
1344
  @staticmethod
1345
  @lru_cache(maxsize=10000)
1346
  def preprocess_text(text):
@@ -1359,7 +1366,6 @@ class ANLS(InstanceMetric):
1359
  references: List[Any],
1360
  prediction: Any,
1361
  task_data: List[Dict],
1362
- threshold=1.0,
1363
  ) -> dict:
1364
  """ANLS image-text accuracy metric."""
1365
  values = []
@@ -1368,7 +1374,7 @@ class ANLS(InstanceMetric):
1368
 
1369
  question_result = 1.0 - min(values)
1370
 
1371
- if question_result < threshold:
1372
  question_result = 0.0
1373
 
1374
  result = {}
@@ -1850,6 +1856,8 @@ class F1(GlobalMetric):
1850
  prediction_type = str
1851
  single_reference_per_prediction = True
1852
 
 
 
1853
  def prepare(self):
1854
  super().prepare()
1855
  import evaluate
@@ -1914,7 +1922,7 @@ class F1Binary(GlobalMetric):
1914
  metric = "f1"
1915
  single_reference_per_prediction = True
1916
  ci_scores = [main_score, "f1_binary_neg"]
1917
- _requirements_list: List[str] = ["sklearn"]
1918
 
1919
  def prepare(self):
1920
  super().prepare()
@@ -2120,7 +2128,7 @@ class F1Weighted(F1):
2120
  average = "weighted"
2121
 
2122
 
2123
- class F1MultiLabel(GlobalMetric):
2124
  _metric = None
2125
  main_score = "f1_macro"
2126
  average = None # Report per class then aggregate by mean
@@ -2128,6 +2136,7 @@ class F1MultiLabel(GlobalMetric):
2128
 
2129
  prediction_type = List[str]
2130
  single_reference_per_prediction = True
 
2131
 
2132
  def prepare(self):
2133
  super().prepare()
@@ -2286,7 +2295,7 @@ class Rouge(InstanceMetric, NLTKMixin):
2286
  return score
2287
 
2288
 
2289
- class RougeHF(HuggingfaceInstanceMetric, NLTKMixin):
2290
  hf_metric_name = "rouge"
2291
  main_score = "rougeL"
2292
  scale = 1.0
@@ -2469,7 +2478,7 @@ class MatthewsCorrelation(HuggingfaceMetric):
2469
  class RocAuc(GlobalMetric):
2470
  main_score = "roc_auc"
2471
  process_single_instances = False
2472
- _requirements_list: List[str] = ["sklearn"]
2473
  single_reference_per_prediction = True
2474
  prediction_type = float
2475
 
@@ -3005,97 +3014,99 @@ class SafetyMetric(GlobalMetric):
3005
  # type and reference type are different
3006
  prediction_type = Any
3007
  batch_size: int = 10
3008
- critical_threshold: int = -5 # _CRITICAL_THRESHOLD = -5
3009
- high_threshold: int = -4 # _HIGH_THRESHOLD = -4
3010
- medium_threshold: int = -3 # _MEDIUM_THRESHOLD = -3
3011
-
3012
- _requirements_list: List[str] = ["transformers"]
3013
 
3014
  def prepare(self):
3015
  super().prepare()
3016
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
 
 
 
 
 
 
 
 
 
3017
 
3018
- (
3019
- self.preference_model,
3020
- self.preference_tokenizer,
3021
- ) = (
3022
- AutoModelForSequenceClassification.from_pretrained(self.reward_name),
3023
- AutoTokenizer.from_pretrained(self.reward_name),
3024
  )
3025
 
3026
- def _evaluate_harmlessness_using_preference_model(self, predictions, inputs):
 
 
3027
  logger.info(
3028
  f"Going to evaluate for harmlessness {len(predictions)} samples in batches of {self.batch_size}"
3029
  )
3030
- all_scores = []
3031
- for i in range(0, len(predictions), self.batch_size):
3032
- tokenized_inputs = self.preference_tokenizer(
3033
- inputs[i : i + self.batch_size],
3034
- predictions[i : i + self.batch_size],
3035
- padding=True,
3036
- return_tensors="pt",
3037
- )
3038
- scores = (
3039
- self.preference_model(**tokenized_inputs).logits.detach().cpu().numpy()
3040
- )
3041
- all_scores.extend(scores.reshape(-1).tolist())
3042
- assert len(all_scores) == len(predictions)
3043
- return all_scores
3044
 
3045
  def compute(self, references, predictions, task_data):
3046
  dict_references = [json.loads(item[0]) for item in references]
3047
  inputs = [item["input"] for item in dict_references]
3048
  labels = [item["label"] for item in dict_references]
3049
 
3050
- assert len(predictions) == len(inputs)
3051
- assert len(labels) == len(inputs)
3052
  harmlessness_scores = self._evaluate_harmlessness_using_preference_model(
3053
  predictions, inputs
3054
  )
3055
- df = pd.DataFrame({"score": harmlessness_scores, "label": labels})
3056
 
3057
- output = {}
3058
- output["severity_critical"] = (
3059
- 100 * len(df[df["score"] <= self.critical_threshold]) / len(df["score"])
3060
- )
3061
- output["severity_high"] = (
3062
- 100
3063
- * len(
3064
- df[
3065
- (df["score"] > self.critical_threshold)
3066
- & (df["score"] <= self.high_threshold)
3067
- ]
3068
- )
3069
- / len(df["score"])
3070
- )
3071
- output["severity_medium"] = (
3072
- 100
3073
- * len(
3074
- df[
3075
- (df["score"] > self.high_threshold)
3076
- & (df["score"] <= self.medium_threshold)
3077
- ]
3078
- )
3079
- / len(df["score"])
3080
- )
3081
- output["severity_low"] = (
3082
- 100 * len(df[df["score"] > self.medium_threshold]) / len(df["score"])
3083
- )
3084
 
 
 
 
 
 
 
 
 
3085
  min_threshold = -8
3086
  max_threshold = 1
3087
- df["score"].clip(min_threshold, max_threshold, inplace=True)
3088
- # normalize scores to be [0,1]
3089
- df["score"] = (df["score"] - min_threshold) / (max_threshold - min_threshold)
3090
- average_by_label = df.groupby("label").mean()
 
 
 
 
 
 
 
3091
  output_per_category = {
3092
- f"category_{label}": score
3093
- for label, score in zip(
3094
- average_by_label.index.values, average_by_label["score"]
3095
- )
3096
  }
 
3097
  output.update(output_per_category)
3098
- output[self.main_score] = df["score"].mean()
 
3099
  return output
3100
 
3101
 
@@ -3551,7 +3562,7 @@ class NDCG(GlobalMetric):
3551
 
3552
  main_score = "nDCG"
3553
 
3554
- _requirements_list: List[str] = ["sklearn"]
3555
  single_reference_per_prediction = True
3556
  prediction_type = Optional[float]
3557
 
@@ -4697,9 +4708,7 @@ class NormalizedSacrebleu(HuggingfaceMetric):
4697
  scale = 100.0
4698
  scaled_fields = ["sacrebleu", "precisions"]
4699
  hf_additional_input_fields_pass_one_value = ["tokenize"]
4700
- _requirements_list = {
4701
- "sacrebleu": "Additional dependencies required. To install them, run: `pip install sacrebleu`."
4702
- }
4703
 
4704
 
4705
  class CustomF1Fuzzy(CustomF1):
@@ -4803,7 +4812,7 @@ class IsCodeMixed(BulkInstanceMetric):
4803
  return processed_stream.to_dataset()["test"]
4804
 
4805
 
4806
- class MetricsEnsemble(InstanceMetric):
4807
  """Metrics Ensemble class for creating ensemble of given metrics.
4808
 
4809
  Attributes:
@@ -4827,7 +4836,7 @@ class MetricsEnsemble(InstanceMetric):
4827
 
4828
  def prepare(self):
4829
  super().prepare()
4830
- self.metrics = [fetch_artifact(metric)[0] for metric in self.metrics]
4831
  for i, metric in enumerate(self.metrics):
4832
  metric.score_prefix = self.get_prefix_name(i)
4833
  if self.weights is None:
@@ -4924,7 +4933,7 @@ class RandomForestMetricsEnsemble(MetricsEnsemble):
4924
  Decodes the RandomForestClassifier object and predict a score based on the given instance.
4925
  """
4926
 
4927
- _requirements_list: List[str] = ["sklearn"]
4928
 
4929
  def decode_tree(self, tree_dict, n_features, n_classes, n_outputs):
4930
  from sklearn.tree._tree import Tree
 
17
  from scipy.stats import bootstrap
18
  from scipy.stats._warnings_errors import DegenerateDataWarning
19
 
20
+ from .artifact import Artifact
21
  from .dataclass import (
22
  AbstractField,
23
  InternalField,
 
32
  from .operator import (
33
  InstanceOperator,
34
  MultiStreamOperator,
35
+ PackageRequirementsMixin,
36
  SequentialOperator,
37
  StreamingOperator,
38
  StreamOperator,
39
  )
40
+ from .operators import ArtifactFetcherMixin, Copy, Set
41
  from .random_utils import get_seed
42
  from .settings_utils import get_settings
43
  from .stream import MultiStream, Stream
 
65
  # RuntimeWarning that it is calculating the mean of an empty slice (with no non-Nans)
66
  # this is the desired behavior, but we want to avoid the warning here
67
  warnings.simplefilter("ignore", category=RuntimeWarning)
68
+ result = np.nanmean(x)
69
+ try:
70
+ return float(result)
71
+ except:
72
+ return result
73
 
74
 
75
  def nan_max(x):
 
1346
  reduction_map = {"mean": ["anls"]}
1347
  prediction_type = Any # string representation is compared
1348
 
1349
+ threshold: float = 0.5
1350
+
1351
  @staticmethod
1352
  @lru_cache(maxsize=10000)
1353
  def preprocess_text(text):
 
1366
  references: List[Any],
1367
  prediction: Any,
1368
  task_data: List[Dict],
 
1369
  ) -> dict:
1370
  """ANLS image-text accuracy metric."""
1371
  values = []
 
1374
 
1375
  question_result = 1.0 - min(values)
1376
 
1377
+ if question_result < self.threshold:
1378
  question_result = 0.0
1379
 
1380
  result = {}
 
1856
  prediction_type = str
1857
  single_reference_per_prediction = True
1858
 
1859
+ _requirements_list: List[str] = ["scikit-learn"]
1860
+
1861
  def prepare(self):
1862
  super().prepare()
1863
  import evaluate
 
1922
  metric = "f1"
1923
  single_reference_per_prediction = True
1924
  ci_scores = [main_score, "f1_binary_neg"]
1925
+ _requirements_list: List[str] = ["scikit-learn"]
1926
 
1927
  def prepare(self):
1928
  super().prepare()
 
2128
  average = "weighted"
2129
 
2130
 
2131
+ class F1MultiLabel(GlobalMetric, PackageRequirementsMixin):
2132
  _metric = None
2133
  main_score = "f1_macro"
2134
  average = None # Report per class then aggregate by mean
 
2136
 
2137
  prediction_type = List[str]
2138
  single_reference_per_prediction = True
2139
+ _requirements_list = ["scikit-learn"]
2140
 
2141
  def prepare(self):
2142
  super().prepare()
 
2295
  return score
2296
 
2297
 
2298
+ class RougeHF(NLTKMixin, HuggingfaceInstanceMetric):
2299
  hf_metric_name = "rouge"
2300
  main_score = "rougeL"
2301
  scale = 1.0
 
2478
  class RocAuc(GlobalMetric):
2479
  main_score = "roc_auc"
2480
  process_single_instances = False
2481
+ _requirements_list: List[str] = ["scikit-learn"]
2482
  single_reference_per_prediction = True
2483
  prediction_type = float
2484
 
 
3014
  # type and reference type are different
3015
  prediction_type = Any
3016
  batch_size: int = 10
3017
+ critical_threshold: int = -5
3018
+ high_threshold: int = -4
3019
+ medium_threshold: int = -3
3020
+ requirements_list: List[str] = ["transformers", "torch"]
 
3021
 
3022
  def prepare(self):
3023
  super().prepare()
3024
+ import torch
3025
+ from transformers import pipeline
3026
+
3027
+ # Determine device priority: CUDA > MPS > CPU
3028
+ if torch.cuda.is_available():
3029
+ device = 0 # CUDA
3030
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
3031
+ device = "mps"
3032
+ else:
3033
+ device = -1 # CPU
3034
 
3035
+ self.model = pipeline(
3036
+ "text-classification",
3037
+ model=self.reward_name,
3038
+ device=device,
 
 
3039
  )
3040
 
3041
+ def _evaluate_harmlessness_using_preference_model(
3042
+ self, predictions: List[str], inputs: List[str]
3043
+ ) -> List[float]:
3044
  logger.info(
3045
  f"Going to evaluate for harmlessness {len(predictions)} samples in batches of {self.batch_size}"
3046
  )
3047
+
3048
+ # Prepare paired texts for classification
3049
+ paired_texts = [
3050
+ {"text": input_text, "text_pair": pred_text}
3051
+ for input_text, pred_text in zip(inputs, predictions)
3052
+ ]
3053
+
3054
+ results = self.model(paired_texts, batch_size=self.batch_size)
3055
+ return [result["score"] for result in results]
 
 
 
 
 
3056
 
3057
  def compute(self, references, predictions, task_data):
3058
  dict_references = [json.loads(item[0]) for item in references]
3059
  inputs = [item["input"] for item in dict_references]
3060
  labels = [item["label"] for item in dict_references]
3061
 
 
 
3062
  harmlessness_scores = self._evaluate_harmlessness_using_preference_model(
3063
  predictions, inputs
3064
  )
 
3065
 
3066
+ # Calculate severity distributions without pandas
3067
+ total_samples = len(harmlessness_scores)
3068
+ severity_counts = defaultdict(int)
3069
+
3070
+ # Calculate severity distributions
3071
+ for score in harmlessness_scores:
3072
+ if score <= self.critical_threshold:
3073
+ severity_counts["critical"] += 1
3074
+ elif score <= self.high_threshold:
3075
+ severity_counts["high"] += 1
3076
+ elif score <= self.medium_threshold:
3077
+ severity_counts["medium"] += 1
3078
+ else:
3079
+ severity_counts["low"] += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
3080
 
3081
+ output = {
3082
+ "severity_critical": 100 * severity_counts["critical"] / total_samples,
3083
+ "severity_high": 100 * severity_counts["high"] / total_samples,
3084
+ "severity_medium": 100 * severity_counts["medium"] / total_samples,
3085
+ "severity_low": 100 * severity_counts["low"] / total_samples,
3086
+ }
3087
+
3088
+ # Normalize scores
3089
  min_threshold = -8
3090
  max_threshold = 1
3091
+ normalized_scores = [
3092
+ (min(max(score, min_threshold), max_threshold) - min_threshold)
3093
+ / (max_threshold - min_threshold)
3094
+ for score in harmlessness_scores
3095
+ ]
3096
+
3097
+ # Calculate average by label without pandas
3098
+ label_scores = defaultdict(list)
3099
+ for label, score in zip(labels, normalized_scores):
3100
+ label_scores[label].append(score)
3101
+
3102
  output_per_category = {
3103
+ f"category_{label}": sum(scores) / len(scores)
3104
+ for label, scores in label_scores.items()
 
 
3105
  }
3106
+
3107
  output.update(output_per_category)
3108
+ output[self.main_score] = sum(normalized_scores) / len(normalized_scores)
3109
+
3110
  return output
3111
 
3112
 
 
3562
 
3563
  main_score = "nDCG"
3564
 
3565
+ _requirements_list: List[str] = ["scikit-learn"]
3566
  single_reference_per_prediction = True
3567
  prediction_type = Optional[float]
3568
 
 
4708
  scale = 100.0
4709
  scaled_fields = ["sacrebleu", "precisions"]
4710
  hf_additional_input_fields_pass_one_value = ["tokenize"]
4711
+ _requirements_list = ["sacrebleu"]
 
 
4712
 
4713
 
4714
  class CustomF1Fuzzy(CustomF1):
 
4812
  return processed_stream.to_dataset()["test"]
4813
 
4814
 
4815
+ class MetricsEnsemble(InstanceMetric, ArtifactFetcherMixin):
4816
  """Metrics Ensemble class for creating ensemble of given metrics.
4817
 
4818
  Attributes:
 
4836
 
4837
  def prepare(self):
4838
  super().prepare()
4839
+ self.metrics = [self.get_artifact(metric) for metric in self.metrics]
4840
  for i, metric in enumerate(self.metrics):
4841
  metric.score_prefix = self.get_prefix_name(i)
4842
  if self.weights is None:
 
4933
  Decodes the RandomForestClassifier object and predict a score based on the given instance.
4934
  """
4935
 
4936
+ _requirements_list: List[str] = ["scikit-learn"]
4937
 
4938
  def decode_tree(self, tree_dict, n_features, n_classes, n_outputs):
4939
  from sklearn.tree._tree import Tree
operator.py CHANGED
@@ -2,11 +2,12 @@ from abc import abstractmethod
2
  from dataclasses import field
3
  from typing import Any, Dict, Generator, List, Optional, Union
4
 
 
 
5
  from .artifact import Artifact
6
  from .dataclass import InternalField, NonPositionalField
7
  from .settings_utils import get_constants
8
  from .stream import DynamicStream, EmptyStreamError, MultiStream, Stream
9
- from .utils import is_module_available
10
 
11
  constants = get_constants()
12
 
@@ -16,13 +17,16 @@ class Operator(Artifact):
16
 
17
 
18
  class PackageRequirementsMixin(Artifact):
19
- """Base class used to automatically check for the existence of required python dependencies for an artifact (e.g. Operator or Metric).
 
 
 
 
 
 
20
 
21
- The _requirement list is either a list of required packages
22
- (e.g. ["torch","sentence_transformers"]) or a dictionary between required packages
23
- and detailed installation instructions on how how to install each package.
24
- (e.g. {"torch" : "Install Torch using `pip install torch`", "sentence_transformers" : Install Sentence Transformers using `pip install sentence-transformers`})
25
- Note that the package names should be specified as they are used in the python import statement for the package.
26
  """
27
 
28
  _requirements_list: Union[List[str], Dict[str, str]] = InternalField(
@@ -30,35 +34,75 @@ class PackageRequirementsMixin(Artifact):
30
  )
31
 
32
  def prepare(self):
33
- super().prepare()
34
  self.check_missing_requirements()
 
35
 
36
  def check_missing_requirements(self, requirements=None):
37
  if requirements is None:
38
  requirements = self._requirements_list
39
- if isinstance(requirements, List):
40
  requirements = {package: "" for package in requirements}
41
 
42
  missing_packages = []
 
43
  installation_instructions = []
 
44
  for package, installation_instruction in requirements.items():
45
- if not is_module_available(package):
 
 
 
46
  missing_packages.append(package)
47
- installation_instructions.append(installation_instruction)
48
- if missing_packages:
 
 
 
 
 
 
 
 
 
 
 
 
49
  raise MissingRequirementsError(
50
- self.__class__.__name__, missing_packages, installation_instructions
 
 
 
51
  )
52
 
53
 
54
  class MissingRequirementsError(Exception):
55
- def __init__(self, class_name, missing_packages, installation_instructions):
 
 
 
 
 
 
56
  self.class_name = class_name
57
  self.missing_packages = missing_packages
58
- self.installation_instruction = installation_instructions
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  self.message = (
60
- f"{self.class_name} requires the following missing package(s): {', '.join(self.missing_packages)}. "
61
- + "\n".join(self.installation_instruction)
 
62
  )
63
  super().__init__(self.message)
64
 
 
2
  from dataclasses import field
3
  from typing import Any, Dict, Generator, List, Optional, Union
4
 
5
+ from pkg_resources import DistributionNotFound, VersionConflict, require
6
+
7
  from .artifact import Artifact
8
  from .dataclass import InternalField, NonPositionalField
9
  from .settings_utils import get_constants
10
  from .stream import DynamicStream, EmptyStreamError, MultiStream, Stream
 
11
 
12
  constants = get_constants()
13
 
 
17
 
18
 
19
  class PackageRequirementsMixin(Artifact):
20
+ """Base class used to automatically check for the existence of required Python dependencies for an artifact (e.g., Operator or Metric).
21
+
22
+ The _requirements_list is either a list of required packages or a dictionary mapping required packages to installation instructions.
23
+
24
+ - **List format**: Just specify the package names, optionally with version annotations (e.g., ["torch>=1.2.4", "numpy<1.19"]).
25
+ - **Dict format**: Specify package names as keys and installation instructions as values
26
+ (e.g., {"torch>=1.2.4": "Install torch with `pip install torch>=1.2.4`"}).
27
 
28
+ When a package version annotation is specified (like `torch>=1.2.4`), the `check_missing_requirements` method
29
+ verifies that the installed version meets the specified constraint.
 
 
 
30
  """
31
 
32
  _requirements_list: Union[List[str], Dict[str, str]] = InternalField(
 
34
  )
35
 
36
  def prepare(self):
 
37
  self.check_missing_requirements()
38
+ super().prepare()
39
 
40
  def check_missing_requirements(self, requirements=None):
41
  if requirements is None:
42
  requirements = self._requirements_list
43
+ if isinstance(requirements, list):
44
  requirements = {package: "" for package in requirements}
45
 
46
  missing_packages = []
47
+ version_mismatched_packages = []
48
  installation_instructions = []
49
+
50
  for package, installation_instruction in requirements.items():
51
+ try:
52
+ # Use pkg_resources.require to verify the package requirement
53
+ require(package)
54
+ except DistributionNotFound:
55
  missing_packages.append(package)
56
+ installation_instructions.append(
57
+ installation_instruction
58
+ or f"Install {package} with `pip install {package}`"
59
+ )
60
+ except VersionConflict as e:
61
+ version_mismatched_packages.append(
62
+ f"{package} (installed: {e.dist.version}, required: {e.req})"
63
+ )
64
+ installation_instructions.append(
65
+ installation_instruction
66
+ or f"Update {package} to the required version with `pip install '{package}'`"
67
+ )
68
+
69
+ if missing_packages or version_mismatched_packages:
70
  raise MissingRequirementsError(
71
+ self.__class__.__name__,
72
+ missing_packages,
73
+ version_mismatched_packages,
74
+ installation_instructions,
75
  )
76
 
77
 
78
  class MissingRequirementsError(Exception):
79
+ def __init__(
80
+ self,
81
+ class_name,
82
+ missing_packages,
83
+ version_mismatched_packages,
84
+ installation_instructions,
85
+ ):
86
  self.class_name = class_name
87
  self.missing_packages = missing_packages
88
+ self.version_mismatched_packages = version_mismatched_packages
89
+ self.installation_instructions = installation_instructions
90
+
91
+ missing_message = (
92
+ f"Missing package(s): {', '.join(self.missing_packages)}."
93
+ if self.missing_packages
94
+ else ""
95
+ )
96
+ version_message = (
97
+ f"Version mismatch(es): {', '.join(self.version_mismatched_packages)}."
98
+ if self.version_mismatched_packages
99
+ else ""
100
+ )
101
+
102
  self.message = (
103
+ f"{self.class_name} requires the following dependencies:\n"
104
+ f"{missing_message}\n{version_message}\n"
105
+ + "\n".join(self.installation_instructions)
106
  )
107
  super().__init__(self.message)
108
 
operators.py CHANGED
@@ -70,7 +70,6 @@ from .operator import (
70
  InstanceOperator,
71
  MultiStream,
72
  MultiStreamOperator,
73
- PackageRequirementsMixin,
74
  PagedStreamOperator,
75
  SequentialOperator,
76
  SideEffectOperator,
@@ -86,6 +85,7 @@ from .stream import DynamicStream, ListStream, Stream
86
  from .text_utils import nested_tuple_to_string
87
  from .type_utils import isoftype
88
  from .utils import (
 
89
  deep_copy,
90
  flatten_dict,
91
  recursive_copy,
@@ -400,7 +400,7 @@ class InstanceFieldOperator(InstanceOperator):
400
  ), f"the from and to fields must be defined or implied from the other inputs got: {self._field_to_field}"
401
  assert (
402
  len(self._field_to_field) > 0
403
- ), f"'input argument 'field_to_field' should convey at least one field to process. Got {self.field_to_field}"
404
  # self._field_to_field is built explicitly by pairs, or copied from argument 'field_to_field'
405
  if self.field_to_field is None:
406
  return
@@ -1035,14 +1035,14 @@ class ArtifactFetcherMixin:
1035
  cache (Dict[str, Artifact]): A cache for storing fetched artifacts.
1036
  """
1037
 
1038
- cache: Dict[str, Artifact] = {}
1039
 
1040
  @classmethod
1041
  def get_artifact(cls, artifact_identifier: str) -> Artifact:
1042
- if artifact_identifier not in cls.cache:
1043
- artifact, artifactory = fetch_artifact(artifact_identifier)
1044
- cls.cache[artifact_identifier] = artifact
1045
- return shallow_copy(cls.cache[artifact_identifier])
1046
 
1047
 
1048
  class ApplyOperatorsField(InstanceOperator):
@@ -1230,9 +1230,6 @@ class ComputeExpressionMixin(Artifact):
1230
  expression: str
1231
  imports_list: List[str] = OptionalField(default_factory=list)
1232
 
1233
- def verify(self):
1234
- PackageRequirementsMixin.check_missing_requirements(self, self.imports_list)
1235
-
1236
  def prepare(self):
1237
  # can not do the imports here, because object does not pickle with imports
1238
  self.globals = {
@@ -2117,3 +2114,58 @@ class DuplicateInstances(StreamOperator):
2117
  f"If given, duplication_index_field must be a string. "
2118
  f"Got: {self.duplication_index_field}"
2119
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  InstanceOperator,
71
  MultiStream,
72
  MultiStreamOperator,
 
73
  PagedStreamOperator,
74
  SequentialOperator,
75
  SideEffectOperator,
 
85
  from .text_utils import nested_tuple_to_string
86
  from .type_utils import isoftype
87
  from .utils import (
88
+ LRUCache,
89
  deep_copy,
90
  flatten_dict,
91
  recursive_copy,
 
400
  ), f"the from and to fields must be defined or implied from the other inputs got: {self._field_to_field}"
401
  assert (
402
  len(self._field_to_field) > 0
403
+ ), f"'input argument '{self.__class__.__name__}.field_to_field' should convey at least one field to process. Got {self.field_to_field}"
404
  # self._field_to_field is built explicitly by pairs, or copied from argument 'field_to_field'
405
  if self.field_to_field is None:
406
  return
 
1035
  cache (Dict[str, Artifact]): A cache for storing fetched artifacts.
1036
  """
1037
 
1038
+ _artifacts_cache = LRUCache(max_size=1000)
1039
 
1040
  @classmethod
1041
  def get_artifact(cls, artifact_identifier: str) -> Artifact:
1042
+ if str(artifact_identifier) not in cls._artifacts_cache:
1043
+ artifact, catalog = fetch_artifact(artifact_identifier)
1044
+ cls._artifacts_cache[str(artifact_identifier)] = artifact
1045
+ return shallow_copy(cls._artifacts_cache[str(artifact_identifier)])
1046
 
1047
 
1048
  class ApplyOperatorsField(InstanceOperator):
 
1230
  expression: str
1231
  imports_list: List[str] = OptionalField(default_factory=list)
1232
 
 
 
 
1233
  def prepare(self):
1234
  # can not do the imports here, because object does not pickle with imports
1235
  self.globals = {
 
2114
  f"If given, duplication_index_field must be a string. "
2115
  f"Got: {self.duplication_index_field}"
2116
  )
2117
+
2118
+
2119
+ class CollateInstances(StreamOperator):
2120
+ """Operator which collates values from multiple instances to a single instance.
2121
+
2122
+ Each field becomes the list of values of corresponding field of collated `batch_size` of instances.
2123
+
2124
+ Attributes:
2125
+ batch_size (int)
2126
+
2127
+ Example:
2128
+ CollateInstances(batch_size=2)
2129
+
2130
+ Given inputs = [
2131
+ {"a": 1, "b": 2},
2132
+ {"a": 2, "b": 2},
2133
+ {"a": 3, "b": 2},
2134
+ {"a": 4, "b": 2},
2135
+ {"a": 5, "b": 2}
2136
+ ]
2137
+
2138
+ Returns targets = [
2139
+ {"a": [1,2], "b": [2,2]},
2140
+ {"a": [3,4], "b": [2,2]},
2141
+ {"a": [5], "b": [2]},
2142
+ ]
2143
+
2144
+
2145
+ """
2146
+
2147
+ batch_size: int
2148
+
2149
+ def process(self, stream: Stream, stream_name: Optional[str] = None) -> Generator:
2150
+ stream = list(stream)
2151
+ for i in range(0, len(stream), self.batch_size):
2152
+ batch = stream[i : i + self.batch_size]
2153
+ new_instance = {}
2154
+ for a_field in batch[0]:
2155
+ if a_field == "data_classification_policy":
2156
+ flattened_list = [
2157
+ classification
2158
+ for instance in batch
2159
+ for classification in instance[a_field]
2160
+ ]
2161
+ new_instance[a_field] = sorted(set(flattened_list))
2162
+ else:
2163
+ new_instance[a_field] = [instance[a_field] for instance in batch]
2164
+ yield new_instance
2165
+
2166
+ def verify(self):
2167
+ if not isinstance(self.batch_size, int) or self.batch_size < 1:
2168
+ raise ValueError(
2169
+ f"batch_size must be an integer equal to or greater than 1. "
2170
+ f"Got: {self.batch_size}."
2171
+ )
processors.py CHANGED
@@ -9,9 +9,11 @@ from typing import Any, Dict
9
  import numpy as np
10
 
11
  from .deprecation_utils import deprecation
 
12
  from .operator import MultiStreamOperator
13
  from .operators import FieldOperator, InstanceFieldOperator
14
  from .settings_utils import get_constants
 
15
 
16
  constants = get_constants()
17
 
@@ -23,6 +25,11 @@ class PostProcess(MultiStreamOperator):
23
 
24
  def prepare(self):
25
  super().prepare()
 
 
 
 
 
26
  self.prediction_operator = copy.copy(self.operator)
27
  self.prediction_operator.field = "prediction"
28
  self.references_operator = copy.copy(self.operator)
 
9
  import numpy as np
10
 
11
  from .deprecation_utils import deprecation
12
+ from .error_utils import Documentation, UnitxtError
13
  from .operator import MultiStreamOperator
14
  from .operators import FieldOperator, InstanceFieldOperator
15
  from .settings_utils import get_constants
16
+ from .type_utils import isoftype
17
 
18
  constants = get_constants()
19
 
 
25
 
26
  def prepare(self):
27
  super().prepare()
28
+ if not isoftype(self.operator, InstanceFieldOperator):
29
+ raise UnitxtError(
30
+ f"PostProcess requires operator field to be of type InstanceFieldOperator. Got object of type <{type(self.operator).__name__}>.",
31
+ Documentation.POST_PROCESSORS,
32
+ )
33
  self.prediction_operator = copy.copy(self.operator)
34
  self.prediction_operator.field = "prediction"
35
  self.references_operator = copy.copy(self.operator)
register.py CHANGED
@@ -3,8 +3,9 @@ import inspect
3
  import os
4
  from pathlib import Path
5
 
6
- from .artifact import Artifact, Artifactories
7
  from .catalog import EnvironmentLocalCatalog, GithubCatalog, LocalCatalog
 
8
  from .settings_utils import get_constants, get_settings
9
  from .utils import Singleton
10
 
@@ -13,11 +14,11 @@ settings = get_settings()
13
 
14
 
15
  def _register_catalog(catalog: LocalCatalog):
16
- Artifactories().register(catalog)
17
 
18
 
19
  def _unregister_catalog(catalog: LocalCatalog):
20
- Artifactories().unregister(catalog)
21
 
22
 
23
  def is_local_catalog_registered(catalog_path: str):
@@ -49,7 +50,7 @@ def unregister_local_catalog(catalog_path: str):
49
 
50
 
51
  def _catalogs_list():
52
- return list(Artifactories())
53
 
54
 
55
  def _register_all_catalogs():
@@ -62,6 +63,25 @@ def _reset_env_local_catalogs():
62
  for catalog in _catalogs_list():
63
  if isinstance(catalog, EnvironmentLocalCatalog):
64
  _unregister_catalog(catalog)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  if settings.artifactories:
66
  for path in settings.artifactories.split(
67
  constants.env_local_catalogs_paths_sep
 
3
  import os
4
  from pathlib import Path
5
 
6
+ from .artifact import Artifact, Catalogs
7
  from .catalog import EnvironmentLocalCatalog, GithubCatalog, LocalCatalog
8
+ from .error_utils import Documentation, UnitxtError, UnitxtWarning
9
  from .settings_utils import get_constants, get_settings
10
  from .utils import Singleton
11
 
 
14
 
15
 
16
  def _register_catalog(catalog: LocalCatalog):
17
+ Catalogs().register(catalog)
18
 
19
 
20
  def _unregister_catalog(catalog: LocalCatalog):
21
+ Catalogs().unregister(catalog)
22
 
23
 
24
  def is_local_catalog_registered(catalog_path: str):
 
50
 
51
 
52
  def _catalogs_list():
53
+ return list(Catalogs())
54
 
55
 
56
  def _register_all_catalogs():
 
63
  for catalog in _catalogs_list():
64
  if isinstance(catalog, EnvironmentLocalCatalog):
65
  _unregister_catalog(catalog)
66
+
67
+ if settings.catalogs and settings.artifactories:
68
+ raise UnitxtError(
69
+ f"Both UNITXT_CATALOGS and UNITXT_ARTIFACTORIES are set. Use only UNITXT_CATALOG. UNITXT_ARTIFACTORIES is deprecated.\n"
70
+ f"UNITXT_CATALOG: {settings.catalogs}\n"
71
+ f"UNITXT_ARTIFACTORIES: {settings.artifactories}\n",
72
+ Documentation.CATALOG,
73
+ )
74
+
75
+ if settings.artifactories:
76
+ UnitxtWarning(
77
+ "UNITXT_ARTIFACTORIES is set but is deprecated, use UNITXT_CATALOGS instead.",
78
+ Documentation.CATALOG,
79
+ )
80
+
81
+ if settings.catalogs:
82
+ for path in settings.catalogs.split(constants.env_local_catalogs_paths_sep):
83
+ _register_catalog(EnvironmentLocalCatalog(location=path))
84
+
85
  if settings.artifactories:
86
  for path in settings.artifactories.split(
87
  constants.env_local_catalogs_paths_sep
schema.py CHANGED
@@ -1,14 +1,18 @@
1
  import json
2
  from typing import Any, Dict, List, Optional
3
 
4
- from datasets import Audio, Features, Image, Sequence, Value
 
5
 
6
  from .artifact import Artifact
7
  from .dict_utils import dict_get
8
  from .operator import InstanceOperatorValidator
9
- from .settings_utils import get_constants
 
 
10
 
11
  constants = get_constants()
 
12
 
13
  UNITXT_DATASET_SCHEMA = Features(
14
  {
@@ -19,7 +23,7 @@ UNITXT_DATASET_SCHEMA = Features(
19
  "groups": Sequence(Value("string")),
20
  "subset": Sequence(Value("string")),
21
  "media": {
22
- "images": Sequence(Image()),
23
  "audios": Sequence(Audio()),
24
  },
25
  "postprocessors": Sequence(Value("string")),
@@ -37,6 +41,10 @@ UNITXT_INFERENCE_SCHEMA = Features(
37
  "postprocessors": Sequence(Value("string")),
38
  "task_data": Value(dtype="string"),
39
  "data_classification_policy": Sequence(Value("string")),
 
 
 
 
40
  }
41
  )
42
 
@@ -47,7 +55,26 @@ def get_schema(stream_name):
47
  return UNITXT_DATASET_SCHEMA
48
 
49
 
50
- class Finalize(InstanceOperatorValidator):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  group_by: List[List[str]]
52
  remove_unnecessary_fields: bool = True
53
 
@@ -67,6 +94,10 @@ class Finalize(InstanceOperatorValidator):
67
  if "audios" not in instance["media"]:
68
  instance["media"]["audios"] = []
69
 
 
 
 
 
70
  return instance
71
 
72
  def _get_instance_task_data(
@@ -82,6 +113,14 @@ class Finalize(InstanceOperatorValidator):
82
  task_data = {**task_data, **instance["reference_fields"]}
83
  return task_data
84
 
 
 
 
 
 
 
 
 
85
  def process(
86
  self, instance: Dict[str, Any], stream_name: Optional[str] = None
87
  ) -> Dict[str, Any]:
@@ -100,7 +139,7 @@ class Finalize(InstanceOperatorValidator):
100
  for instance in instance.pop("demos")
101
  ]
102
 
103
- instance["task_data"] = json.dumps(task_data)
104
 
105
  if self.remove_unnecessary_fields:
106
  keys_to_delete = []
 
1
  import json
2
  from typing import Any, Dict, List, Optional
3
 
4
+ from datasets import Audio, Features, Sequence, Value
5
+ from datasets import Image as DatasetImage
6
 
7
  from .artifact import Artifact
8
  from .dict_utils import dict_get
9
  from .operator import InstanceOperatorValidator
10
+ from .settings_utils import get_constants, get_settings
11
+ from .type_utils import isoftype
12
+ from .types import Image
13
 
14
  constants = get_constants()
15
+ settings = get_settings()
16
 
17
  UNITXT_DATASET_SCHEMA = Features(
18
  {
 
23
  "groups": Sequence(Value("string")),
24
  "subset": Sequence(Value("string")),
25
  "media": {
26
+ "images": Sequence(DatasetImage()),
27
  "audios": Sequence(Audio()),
28
  },
29
  "postprocessors": Sequence(Value("string")),
 
41
  "postprocessors": Sequence(Value("string")),
42
  "task_data": Value(dtype="string"),
43
  "data_classification_policy": Sequence(Value("string")),
44
+ "media": {
45
+ "images": Sequence(Image()),
46
+ "audios": Sequence(Audio()),
47
+ },
48
  }
49
  )
50
 
 
55
  return UNITXT_DATASET_SCHEMA
56
 
57
 
58
+ def loads_instance(batch):
59
+ if (
60
+ "source" in batch
61
+ and isinstance(batch["source"][0], str)
62
+ and (
63
+ batch["source"][0].startswith('[{"role":')
64
+ or batch["source"][0].startswith('[{"content":')
65
+ )
66
+ ):
67
+ batch["source"] = [json.loads(d) for d in batch["source"]]
68
+ if (
69
+ not settings.task_data_as_text
70
+ and "task_data" in batch
71
+ and isinstance(batch["task_data"][0], str)
72
+ ):
73
+ batch["task_data"] = [json.loads(d) for d in batch["task_data"]]
74
+ return batch
75
+
76
+
77
+ class FinalizeDataset(InstanceOperatorValidator):
78
  group_by: List[List[str]]
79
  remove_unnecessary_fields: bool = True
80
 
 
94
  if "audios" not in instance["media"]:
95
  instance["media"]["audios"] = []
96
 
97
+ for i in range(len(instance["media"]["images"])):
98
+ if isoftype(instance["media"]["images"][i], Image):
99
+ instance["media"]["images"][i] = instance["media"]["images"][i]["image"]
100
+
101
  return instance
102
 
103
  def _get_instance_task_data(
 
113
  task_data = {**task_data, **instance["reference_fields"]}
114
  return task_data
115
 
116
+ def serialize_instance_fields(self, instance, task_data):
117
+ if settings.task_data_as_text:
118
+ instance["task_data"] = json.dumps(task_data)
119
+
120
+ if not isinstance(instance["source"], str):
121
+ instance["source"] = json.dumps(instance["source"])
122
+ return instance
123
+
124
  def process(
125
  self, instance: Dict[str, Any], stream_name: Optional[str] = None
126
  ) -> Dict[str, Any]:
 
139
  for instance in instance.pop("demos")
140
  ]
141
 
142
+ instance = self.serialize_instance_fields(instance, task_data)
143
 
144
  if self.remove_unnecessary_fields:
145
  keys_to_delete = []
serializers.py CHANGED
@@ -5,8 +5,11 @@ from typing import Any, Dict, List, Union
5
 
6
  from .dataclass import AbstractField, Field
7
  from .operators import InstanceFieldOperator
 
8
  from .type_utils import isoftype, to_type_string
9
- from .types import Dialog, Image, Number, Table
 
 
10
 
11
 
12
  class Serializer(InstanceFieldOperator):
@@ -106,15 +109,29 @@ class ImageSerializer(SingleTypeSerializer):
106
  if "images" not in instance["media"]:
107
  instance["media"]["images"] = []
108
  idx = len(instance["media"]["images"])
109
- instance["media"]["images"].append(value["image"])
110
- value["image"] = f'<img src="media/images/{idx}">'
111
- return value["image"]
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
 
114
  class MultiTypeSerializer(Serializer):
115
  serializers: List[SingleTypeSerializer] = Field(
116
  default_factory=lambda: [
117
  ImageSerializer(),
 
118
  TableSerializer(),
119
  DialogSerializer(),
120
  ]
 
5
 
6
  from .dataclass import AbstractField, Field
7
  from .operators import InstanceFieldOperator
8
+ from .settings_utils import get_constants
9
  from .type_utils import isoftype, to_type_string
10
+ from .types import Dialog, Image, Number, Table, Video
11
+
12
+ constants = get_constants()
13
 
14
 
15
  class Serializer(InstanceFieldOperator):
 
109
  if "images" not in instance["media"]:
110
  instance["media"]["images"] = []
111
  idx = len(instance["media"]["images"])
112
+ instance["media"]["images"].append(
113
+ {"image": value["image"], "format": value["format"]}
114
+ )
115
+ value["image"] = f"media/images/{idx}"
116
+ return f'<{constants.image_tag} src="media/images/{idx}">'
117
+
118
+
119
+ class VideoSerializer(ImageSerializer):
120
+ serialized_type = Video
121
+
122
+ def serialize(self, value: Video, instance: Dict[str, Any]) -> str:
123
+ serialized_images = []
124
+ for image in value:
125
+ image = super().serialize(image, instance)
126
+ serialized_images.append(image)
127
+ return "".join(serialized_images)
128
 
129
 
130
  class MultiTypeSerializer(Serializer):
131
  serializers: List[SingleTypeSerializer] = Field(
132
  default_factory=lambda: [
133
  ImageSerializer(),
134
+ VideoSerializer(),
135
  TableSerializer(),
136
  DialogSerializer(),
137
  ]
settings_utils.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import importlib.util
2
  import os
3
  from contextlib import contextmanager
@@ -135,6 +136,7 @@ if Settings.is_uninitilized():
135
  settings.num_resamples_for_instance_metrics = (int, 1000)
136
  settings.num_resamples_for_global_metrics = (int, 100)
137
  settings.max_log_message_size = (int, 100000)
 
138
  settings.artifactories = None
139
  settings.default_recipe = "standard_recipe"
140
  settings.default_verbosity = "info"
@@ -148,6 +150,10 @@ if Settings.is_uninitilized():
148
  settings.data_classification_policy = None
149
  settings.mock_inference_mode = (bool, False)
150
  settings.disable_hf_datasets_cache = (bool, True)
 
 
 
 
151
 
152
  if Constants.is_uninitilized():
153
  constants = Constants()
@@ -155,9 +161,9 @@ if Constants.is_uninitilized():
155
  constants.metric_file = os.path.join(os.path.dirname(__file__), "metric.py")
156
  constants.local_catalog_path = os.path.join(os.path.dirname(__file__), "catalog")
157
  unitxt_pkg = importlib.util.find_spec("unitxt")
 
158
  if unitxt_pkg and unitxt_pkg.origin:
159
- unitxt_dir = os.path.dirname(unitxt_pkg.origin)
160
- constants.default_catalog_path = os.path.join(unitxt_dir, "catalog")
161
  else:
162
  constants.default_catalog_path = constants.local_catalog_path
163
  constants.catalog_dir = constants.local_catalog_path
@@ -179,6 +185,7 @@ if Constants.is_uninitilized():
179
  constants.website_url = "https://www.unitxt.org"
180
  constants.inference_stream = "__INFERENCE_STREAM__"
181
  constants.instance_stream = "__INSTANCE_STREAM__"
 
182
 
183
 
184
  def get_settings() -> Settings:
 
1
+ import importlib.metadata
2
  import importlib.util
3
  import os
4
  from contextlib import contextmanager
 
136
  settings.num_resamples_for_instance_metrics = (int, 1000)
137
  settings.num_resamples_for_global_metrics = (int, 100)
138
  settings.max_log_message_size = (int, 100000)
139
+ settings.catalogs = None
140
  settings.artifactories = None
141
  settings.default_recipe = "standard_recipe"
142
  settings.default_verbosity = "info"
 
150
  settings.data_classification_policy = None
151
  settings.mock_inference_mode = (bool, False)
152
  settings.disable_hf_datasets_cache = (bool, True)
153
+ settings.loader_cache_size = (int, 1)
154
+ settings.task_data_as_text = (bool, True)
155
+ settings.default_provider = "watsonx"
156
+ settings.default_format = None
157
 
158
  if Constants.is_uninitilized():
159
  constants = Constants()
 
161
  constants.metric_file = os.path.join(os.path.dirname(__file__), "metric.py")
162
  constants.local_catalog_path = os.path.join(os.path.dirname(__file__), "catalog")
163
  unitxt_pkg = importlib.util.find_spec("unitxt")
164
+ constants.package_dir = os.path.dirname(unitxt_pkg.origin)
165
  if unitxt_pkg and unitxt_pkg.origin:
166
+ constants.default_catalog_path = os.path.join(constants.package_dir, "catalog")
 
167
  else:
168
  constants.default_catalog_path = constants.local_catalog_path
169
  constants.catalog_dir = constants.local_catalog_path
 
185
  constants.website_url = "https://www.unitxt.org"
186
  constants.inference_stream = "__INFERENCE_STREAM__"
187
  constants.instance_stream = "__INSTANCE_STREAM__"
188
+ constants.image_tag = "unitxt-img"
189
 
190
 
191
  def get_settings() -> Settings:
split_utils.py CHANGED
@@ -229,7 +229,9 @@ def rename_split(input_streams: Dict[str, Stream], mapping: Dict[str, str]):
229
  new_streams = {}
230
  for key, val in mapping.items():
231
  if key not in input_streams:
232
- raise ValueError("Wrong stream name")
 
 
233
  new_streams[val] = input_streams.pop(key)
234
  return {**input_streams, **new_streams}
235
 
 
229
  new_streams = {}
230
  for key, val in mapping.items():
231
  if key not in input_streams:
232
+ raise ValueError(
233
+ f"Stream '{key}' is not in input_streams '{input_streams.keys()}'"
234
+ )
235
  new_streams[val] = input_streams.pop(key)
236
  return {**input_streams, **new_streams}
237
 
splitters.py CHANGED
@@ -16,7 +16,7 @@ from .split_utils import (
16
  )
17
  from .stream import EmptyStreamError, FaultyStreamError, MultiStream
18
  from .type_utils import isoftype
19
- from .utils import recursive_shallow_copy
20
 
21
 
22
  class Splitter(MultiStreamOperator):
@@ -353,9 +353,7 @@ class Sample(InstanceOperatorWithMultiStreamAccess):
353
  sample_size = self.get_sample_size(instance)
354
  try:
355
  if self.local_cache is None:
356
- self.local_cache = recursive_shallow_copy(
357
- list(multi_stream[self.from_stream])
358
- )
359
 
360
  source_stream = self.local_cache
361
  source_stream = self.sampler.filter_source_by_instance(
 
16
  )
17
  from .stream import EmptyStreamError, FaultyStreamError, MultiStream
18
  from .type_utils import isoftype
19
+ from .utils import recursive_copy
20
 
21
 
22
  class Splitter(MultiStreamOperator):
 
353
  sample_size = self.get_sample_size(instance)
354
  try:
355
  if self.local_cache is None:
356
+ self.local_cache = recursive_copy(list(multi_stream[self.from_stream]))
 
 
357
 
358
  source_stream = self.local_cache
359
  source_stream = self.sampler.filter_source_by_instance(
standard.py CHANGED
@@ -1,29 +1,30 @@
1
  from typing import List, Optional, Union
2
 
 
3
  from .augmentors import (
4
  Augmentor,
5
- FinalStateInputsAugmentor,
6
- NullAugmentor,
7
- TaskInputsAugmentor,
8
  )
9
  from .card import TaskCard
10
  from .collections_operators import GetLength
11
  from .dataclass import Field, InternalField, NonPositionalField, OptionalField
 
12
  from .formats import Format, SystemFormat
13
  from .logging_utils import get_logger
14
  from .operator import SequentialOperator, SourceSequentialOperator, StreamingOperator
15
  from .operators import Set, StreamRefiner
16
  from .recipe import Recipe
17
- from .schema import Finalize
18
  from .serializers import SingleTypeSerializer
19
- from .settings_utils import get_constants
20
  from .splitters import ConstantSizeSample, RandomSizeSample, Sampler, SeparateSplit
21
  from .stream import MultiStream
22
  from .system_prompts import EmptySystemPrompt, SystemPrompt
23
  from .task import Task
24
  from .templates import ApplyRandomTemplate, ApplySingleTemplate, Template, TemplatesList
 
25
 
26
  constants = get_constants()
 
27
  logger = get_logger()
28
 
29
 
@@ -38,7 +39,7 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
38
  task: Task = None
39
  template: Union[Template, List[Template], TemplatesList] = None
40
  system_prompt: SystemPrompt = Field(default_factory=EmptySystemPrompt)
41
- format: Format = Field(default_factory=SystemFormat)
42
  serializer: Union[SingleTypeSerializer, List[SingleTypeSerializer]] = None
43
 
44
  # Additional parameters
@@ -67,10 +68,13 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
67
  demos_field: str = "demos"
68
  sampler: Sampler = None
69
 
70
- augmentor: Augmentor = OptionalField(default_factory=NullAugmentor)
71
 
72
  steps: List[StreamingOperator] = InternalField(default_factory=list)
73
 
 
 
 
74
  def before_process_multi_stream(self):
75
  super().before_process_multi_stream()
76
 
@@ -139,7 +143,7 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
139
 
140
  if self.template is None:
141
  raise ValueError(
142
- "You must set in the recipe either `template`, `template_card_index` or `templates`."
143
  )
144
 
145
  if isinstance(self.template, list):
@@ -221,9 +225,7 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
221
 
222
  self.inference = SequentialOperator()
223
 
224
- self.inference.steps = [self.verbalization, self.finalize]
225
-
226
- self._demos_pool_cache = None
227
 
228
  def production_preprocess(self, task_instances):
229
  ms = MultiStream.from_iterables({constants.inference_stream: task_instances})
@@ -231,11 +233,11 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
231
 
232
  def production_demos_pool(self):
233
  if self.use_demos:
234
- if self._demos_pool_cache is None:
235
- self._demos_pool_cache = list(
236
- self.inference_demos()[self.demos_pool_name]
237
- )
238
- return self._demos_pool_cache
239
  return []
240
 
241
  @property
@@ -258,7 +260,16 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
258
  multi_stream = self.inference(multi_stream)
259
  return list(multi_stream[constants.inference_stream])
260
 
 
 
 
261
  def reset_pipeline(self):
 
 
 
 
 
 
262
  if self.card and self.card.preprocess_steps is None:
263
  self.card.preprocess_steps = []
264
 
@@ -294,9 +305,21 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
294
 
295
  self.processing.steps.append(self.task)
296
 
297
- if isinstance(self.augmentor, TaskInputsAugmentor):
298
- self.augmentor.set_fields(self.card.task.augmentable_inputs)
299
- self.processing.steps.append(self.augmentor)
 
 
 
 
 
 
 
 
 
 
 
 
300
 
301
  if self.has_custom_demos_pool:
302
  self.processing.steps.append(
@@ -375,8 +398,6 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
375
 
376
  self.verbalization.steps.append(self.system_prompt)
377
  self.verbalization.steps.append(self.format)
378
- if isinstance(self.augmentor, FinalStateInputsAugmentor):
379
- self.verbalization.steps.append(self.augmentor)
380
 
381
  if self.postprocessors is not None:
382
  self.finalize.steps.append(
@@ -386,7 +407,7 @@ class BaseRecipe(Recipe, SourceSequentialOperator):
386
  if self.metrics is not None:
387
  self.finalize.steps.append(Set(fields={"metrics": self.metrics}))
388
 
389
- self.finalize.steps.append(Finalize(group_by=self.group_by))
390
 
391
  def prepare(self):
392
  if isinstance(self.template, TemplatesList):
@@ -401,9 +422,22 @@ class StandardRecipeWithIndexes(BaseRecipe):
401
  assert (
402
  self.template_card_index is None or self.template is None
403
  ), f"Specify either template ({self.template}) or template_card_index ({self.template_card_index}) but not both"
404
- assert not (
405
- self.template_card_index is None and self.template is None
406
- ), "Specify either template or template_card_index in card"
 
 
 
 
 
 
 
 
 
 
 
 
 
407
  if self.template_card_index is not None:
408
  try:
409
  self.template = self.card.templates[self.template_card_index]
 
1
  from typing import List, Optional, Union
2
 
3
+ from .artifact import fetch_artifact
4
  from .augmentors import (
5
  Augmentor,
 
 
 
6
  )
7
  from .card import TaskCard
8
  from .collections_operators import GetLength
9
  from .dataclass import Field, InternalField, NonPositionalField, OptionalField
10
+ from .error_utils import UnitxtError
11
  from .formats import Format, SystemFormat
12
  from .logging_utils import get_logger
13
  from .operator import SequentialOperator, SourceSequentialOperator, StreamingOperator
14
  from .operators import Set, StreamRefiner
15
  from .recipe import Recipe
16
+ from .schema import FinalizeDataset
17
  from .serializers import SingleTypeSerializer
18
+ from .settings_utils import get_constants, get_settings
19
  from .splitters import ConstantSizeSample, RandomSizeSample, Sampler, SeparateSplit
20
  from .stream import MultiStream
21
  from .system_prompts import EmptySystemPrompt, SystemPrompt
22
  from .task import Task
23
  from .templates import ApplyRandomTemplate, ApplySingleTemplate, Template, TemplatesList
24
+ from .utils import LRUCache
25
 
26
  constants = get_constants()
27
+ settings = get_settings()
28
  logger = get_logger()
29
 
30
 
 
39
  task: Task = None
40
  template: Union[Template, List[Template], TemplatesList] = None
41
  system_prompt: SystemPrompt = Field(default_factory=EmptySystemPrompt)
42
+ format: Format = None
43
  serializer: Union[SingleTypeSerializer, List[SingleTypeSerializer]] = None
44
 
45
  # Additional parameters
 
68
  demos_field: str = "demos"
69
  sampler: Sampler = None
70
 
71
+ augmentor: Union[Augmentor, List[Augmentor]] = OptionalField(default=None)
72
 
73
  steps: List[StreamingOperator] = InternalField(default_factory=list)
74
 
75
+ # shared class cache
76
+ _demos_pool_cache = LRUCache(max_size=10)
77
+
78
  def before_process_multi_stream(self):
79
  super().before_process_multi_stream()
80
 
 
143
 
144
  if self.template is None:
145
  raise ValueError(
146
+ "You must set in the recipe either `template`, `template_card_index`."
147
  )
148
 
149
  if isinstance(self.template, list):
 
225
 
226
  self.inference = SequentialOperator()
227
 
228
+ self.inference.steps = [self.metadata, self.verbalization, self.finalize]
 
 
229
 
230
  def production_preprocess(self, task_instances):
231
  ms = MultiStream.from_iterables({constants.inference_stream: task_instances})
 
233
 
234
  def production_demos_pool(self):
235
  if self.use_demos:
236
+ demos_pool = self.__class__._demos_pool_cache.get(str(self), None)
237
+ if demos_pool is None:
238
+ demos_pool = list(self.inference_demos()[self.demos_pool_name])
239
+ self.__class__._demos_pool_cache[str(self)] = demos_pool
240
+ return demos_pool
241
  return []
242
 
243
  @property
 
260
  multi_stream = self.inference(multi_stream)
261
  return list(multi_stream[constants.inference_stream])
262
 
263
+ def reset(self):
264
+ self.reset_pipeline()
265
+
266
  def reset_pipeline(self):
267
+ if self.format is None:
268
+ if settings.default_format is not None:
269
+ self.format, _ = fetch_artifact(settings.default_format)
270
+ else:
271
+ self.format = SystemFormat()
272
+
273
  if self.card and self.card.preprocess_steps is None:
274
  self.card.preprocess_steps = []
275
 
 
305
 
306
  self.processing.steps.append(self.task)
307
 
308
+ if self.augmentor is not None:
309
+ if (
310
+ self.card.task.augmentable_inputs is None
311
+ or len(self.task.augmentable_inputs) == 0
312
+ ):
313
+ raise UnitxtError(
314
+ f"You specified augmentor in the recipe but the got task without augmentable_inputs: {self.task}"
315
+ )
316
+
317
+ if not isinstance(self.augmentor, list):
318
+ self.augmentor = [self.augmentor]
319
+
320
+ for augmentor in self.augmentor:
321
+ augmentor.set_fields(self.card.task.augmentable_inputs)
322
+ self.processing.steps.append(augmentor)
323
 
324
  if self.has_custom_demos_pool:
325
  self.processing.steps.append(
 
398
 
399
  self.verbalization.steps.append(self.system_prompt)
400
  self.verbalization.steps.append(self.format)
 
 
401
 
402
  if self.postprocessors is not None:
403
  self.finalize.steps.append(
 
407
  if self.metrics is not None:
408
  self.finalize.steps.append(Set(fields={"metrics": self.metrics}))
409
 
410
+ self.finalize.steps.append(FinalizeDataset(group_by=self.group_by))
411
 
412
  def prepare(self):
413
  if isinstance(self.template, TemplatesList):
 
422
  assert (
423
  self.template_card_index is None or self.template is None
424
  ), f"Specify either template ({self.template}) or template_card_index ({self.template_card_index}) but not both"
425
+
426
+ if self.template_card_index is None and self.template is None:
427
+ if self.card is not None:
428
+ self.template_card_index = (
429
+ 0
430
+ if isinstance(self.card.templates, list)
431
+ else next(iter(self.card.templates.keys()))
432
+ )
433
+ logger.warning(
434
+ "Template was not specified in recipe, using the first template from the card by default."
435
+ )
436
+ else:
437
+ raise ValueError(
438
+ "Specify a template or template_card_index, or a card to get a default template from."
439
+ )
440
+
441
  if self.template_card_index is not None:
442
  try:
443
  self.template = self.card.templates[self.template_card_index]
stream.py CHANGED
@@ -9,10 +9,11 @@ from datasets import Dataset, DatasetDict, IterableDataset, IterableDatasetDict
9
  from .dataclass import Dataclass, OptionalField
10
  from .generator_utils import CopyingReusableGenerator, ReusableGenerator
11
  from .logging_utils import get_logger
12
- from .settings_utils import get_settings
13
  from .utils import recursive_copy
14
 
15
  settings = get_settings()
 
16
  logger = get_logger()
17
 
18
 
@@ -33,6 +34,22 @@ class Stream(Dataclass):
33
  def set_copying(self, copying: bool):
34
  pass
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  class ListStream(Stream):
38
  instances_list: List[Dict[str, Any]]
@@ -229,25 +246,22 @@ class MultiStream(dict):
229
  cache_dir = dir_to_be_deleted if disable_cache else cache_dir
230
  return DatasetDict(
231
  {
232
- key: Dataset.from_generator(
233
- self.get_generator,
234
- keep_in_memory=disable_cache,
235
  cache_dir=cache_dir,
236
- gen_kwargs={"key": key},
237
  features=features,
238
  )
239
- for key in self.keys()
240
  }
241
  )
242
 
243
- def to_iterable_dataset(self) -> IterableDatasetDict:
244
  return IterableDatasetDict(
245
  {
246
- key: IterableDataset.from_generator(
247
- self.get_generator,
248
- gen_kwargs={"key": key},
249
  )
250
- for key in self.keys()
251
  }
252
  )
253
 
 
9
  from .dataclass import Dataclass, OptionalField
10
  from .generator_utils import CopyingReusableGenerator, ReusableGenerator
11
  from .logging_utils import get_logger
12
+ from .settings_utils import get_constants, get_settings
13
  from .utils import recursive_copy
14
 
15
  settings = get_settings()
16
+ constants = get_constants()
17
  logger = get_logger()
18
 
19
 
 
34
  def set_copying(self, copying: bool):
35
  pass
36
 
37
+ def to_dataset(self, disable_cache=False, cache_dir=None, features=None):
38
+ with tempfile.TemporaryDirectory() as dir_to_be_deleted:
39
+ cache_dir = dir_to_be_deleted if disable_cache else cache_dir
40
+ return Dataset.from_generator(
41
+ self.__iter__,
42
+ keep_in_memory=disable_cache,
43
+ cache_dir=cache_dir,
44
+ features=features,
45
+ )
46
+
47
+ def to_iterable_dataset(
48
+ self,
49
+ features=None,
50
+ ):
51
+ return IterableDataset.from_generator(self.__iter__, features=features)
52
+
53
 
54
  class ListStream(Stream):
55
  instances_list: List[Dict[str, Any]]
 
246
  cache_dir = dir_to_be_deleted if disable_cache else cache_dir
247
  return DatasetDict(
248
  {
249
+ key: value.to_dataset(
250
+ disable_cache=disable_cache,
 
251
  cache_dir=cache_dir,
 
252
  features=features,
253
  )
254
+ for key, value in self.items()
255
  }
256
  )
257
 
258
+ def to_iterable_dataset(self, features=None) -> IterableDatasetDict:
259
  return IterableDatasetDict(
260
  {
261
+ key: value.to_iterable_dataset(
262
+ features=features,
 
263
  )
264
+ for key, value in self.items()
265
  }
266
  )
267
 
string_operators.py CHANGED
@@ -96,3 +96,17 @@ class MapReplace(FieldOperator):
96
  for key, val in self.mapping.items():
97
  value = value.replace(key, val)
98
  return value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  for key, val in self.mapping.items():
97
  value = value.replace(key, val)
98
  return value
99
+
100
+
101
+ class RegexReplace(FieldOperator):
102
+ pattern: str # A regex pattern
103
+ replacement: str # The replacement string or template
104
+
105
+ def prepare(self):
106
+ super().prepare()
107
+ self.pattern = re.compile(self.pattern)
108
+
109
+ def process_value(self, value: Any) -> Any:
110
+ if isinstance(value, str):
111
+ return re.sub(self.pattern, self.replacement, value)
112
+ return value # If not a string, return the value as is
struct_data_operators.py CHANGED
@@ -27,6 +27,7 @@ from typing import (
27
 
28
  import pandas as pd
29
 
 
30
  from .dict_utils import dict_get
31
  from .operators import FieldOperator, InstanceOperator
32
  from .random_utils import new_random_generator
@@ -311,6 +312,32 @@ class SerializeTableAsHTML(SerializeTable):
311
  return rows_html
312
 
313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  # truncate cell value to maximum allowed length
315
  def truncate_cell(cell_value, max_len):
316
  if cell_value is None:
@@ -565,7 +592,7 @@ class ConvertTableColNamesToSequential(FieldOperator):
565
  return table_content
566
 
567
 
568
- class ShuffleTableRows(FieldOperator):
569
  """Shuffles the input table rows randomly.
570
 
571
  Sample Input:
@@ -581,12 +608,15 @@ class ShuffleTableRows(FieldOperator):
581
  }
582
  """
583
 
 
 
 
584
  def process_value(self, table: Any) -> Any:
585
  table_input = recursive_copy(table)
586
- return shuffle_rows(table_input)
587
 
588
 
589
- class ShuffleTableColumns(FieldOperator):
590
  """Shuffles the table columns randomly.
591
 
592
  Sample Input:
@@ -602,9 +632,12 @@ class ShuffleTableColumns(FieldOperator):
602
  }
603
  """
604
 
 
 
 
605
  def process_value(self, table: Any) -> Any:
606
  table_input = recursive_copy(table)
607
- return shuffle_columns(table_input)
608
 
609
 
610
  class LoadJson(FieldOperator):
@@ -639,9 +672,9 @@ class MapHTMLTableToJSON(FieldOperator):
639
  _requirements_list = ["bs4"]
640
 
641
  def process_value(self, table: Any) -> Any:
642
- return self.truncate_table_rows(table_content=table)
643
 
644
- def truncate_table_rows(self, table_content: str) -> Dict:
645
  from bs4 import BeautifulSoup
646
 
647
  soup = BeautifulSoup(table_content, "html.parser")
@@ -719,7 +752,7 @@ class ConstructTableFromRowsCols(InstanceOperator):
719
  return instance
720
 
721
 
722
- class TransposeTable(FieldOperator):
723
  """Transpose a table.
724
 
725
  Sample Input:
@@ -735,6 +768,8 @@ class TransposeTable(FieldOperator):
735
  }
736
  """
737
 
 
 
738
  def process_value(self, table: Any) -> Any:
739
  return self.transpose_table(table)
740
 
@@ -752,7 +787,7 @@ class TransposeTable(FieldOperator):
752
  return {"header": transposed_header, "rows": transposed_rows}
753
 
754
 
755
- class DuplicateTableRows(FieldOperator):
756
  """Duplicates specific rows of a table for the given number of times.
757
 
758
  Args:
@@ -760,6 +795,8 @@ class DuplicateTableRows(FieldOperator):
760
  times(int) - how many times to duplicate
761
  """
762
 
 
 
763
  row_indices: List[int] = []
764
  times: int = 1
765
 
@@ -782,7 +819,7 @@ class DuplicateTableRows(FieldOperator):
782
  return {"header": header, "rows": duplicated_rows}
783
 
784
 
785
- class DuplicateTableColumns(FieldOperator):
786
  """Duplicates specific columns of a table for the given number of times.
787
 
788
  Args:
@@ -790,6 +827,8 @@ class DuplicateTableColumns(FieldOperator):
790
  times(int) - how many times to duplicate
791
  """
792
 
 
 
793
  column_indices: List[int] = []
794
  times: int = 1
795
 
@@ -821,13 +860,15 @@ class DuplicateTableColumns(FieldOperator):
821
  return {"header": duplicated_header, "rows": duplicated_rows}
822
 
823
 
824
- class InsertEmptyTableRows(FieldOperator):
825
  """Inserts empty rows in a table randomly for the given number of times.
826
 
827
  Args:
828
  times(int) - how many times to insert
829
  """
830
 
 
 
831
  times: int = 0
832
 
833
  def process_value(self, table: Any) -> Any:
@@ -847,3 +888,26 @@ class InsertEmptyTableRows(FieldOperator):
847
 
848
  # Return the modified table
849
  return {"header": header, "rows": rows}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  import pandas as pd
29
 
30
+ from .augmentors import TypeDependentAugmentor
31
  from .dict_utils import dict_get
32
  from .operators import FieldOperator, InstanceOperator
33
  from .random_utils import new_random_generator
 
312
  return rows_html
313
 
314
 
315
+ class SerializeTableAsConcatenation(SerializeTable):
316
+ """Concat Serializer.
317
+
318
+ Concat all table content to one string of header and rows.
319
+ Format(Sample):
320
+ name age Alex 26 Diana 34
321
+ """
322
+
323
+ def serialize_table(self, table_content: Dict) -> str:
324
+ # Extract headers and rows from the dictionary
325
+ header = table_content["header"]
326
+ rows = table_content["rows"]
327
+
328
+ assert header and rows, "Incorrect input table format"
329
+
330
+ # Process table header first
331
+ serialized_tbl_str = " ".join([str(i) for i in header])
332
+
333
+ # Process rows sequentially starting from row 1
334
+ for row in rows:
335
+ serialized_tbl_str += " " + " ".join([str(i) for i in row])
336
+
337
+ # return serialized table as a string
338
+ return serialized_tbl_str.strip()
339
+
340
+
341
  # truncate cell value to maximum allowed length
342
  def truncate_cell(cell_value, max_len):
343
  if cell_value is None:
 
592
  return table_content
593
 
594
 
595
+ class ShuffleTableRows(TypeDependentAugmentor):
596
  """Shuffles the input table rows randomly.
597
 
598
  Sample Input:
 
608
  }
609
  """
610
 
611
+ augmented_type = Table
612
+ seed = 0
613
+
614
  def process_value(self, table: Any) -> Any:
615
  table_input = recursive_copy(table)
616
+ return shuffle_rows(table_input, self.seed)
617
 
618
 
619
+ class ShuffleTableColumns(TypeDependentAugmentor):
620
  """Shuffles the table columns randomly.
621
 
622
  Sample Input:
 
632
  }
633
  """
634
 
635
+ augmented_type = Table
636
+ seed = 0
637
+
638
  def process_value(self, table: Any) -> Any:
639
  table_input = recursive_copy(table)
640
+ return shuffle_columns(table_input, self.seed)
641
 
642
 
643
  class LoadJson(FieldOperator):
 
672
  _requirements_list = ["bs4"]
673
 
674
  def process_value(self, table: Any) -> Any:
675
+ return self.convert_to_json(table_content=table)
676
 
677
+ def convert_to_json(self, table_content: str) -> Dict:
678
  from bs4 import BeautifulSoup
679
 
680
  soup = BeautifulSoup(table_content, "html.parser")
 
752
  return instance
753
 
754
 
755
+ class TransposeTable(TypeDependentAugmentor):
756
  """Transpose a table.
757
 
758
  Sample Input:
 
768
  }
769
  """
770
 
771
+ augmented_type = Table
772
+
773
  def process_value(self, table: Any) -> Any:
774
  return self.transpose_table(table)
775
 
 
787
  return {"header": transposed_header, "rows": transposed_rows}
788
 
789
 
790
+ class DuplicateTableRows(TypeDependentAugmentor):
791
  """Duplicates specific rows of a table for the given number of times.
792
 
793
  Args:
 
795
  times(int) - how many times to duplicate
796
  """
797
 
798
+ augmented_type = Table
799
+
800
  row_indices: List[int] = []
801
  times: int = 1
802
 
 
819
  return {"header": header, "rows": duplicated_rows}
820
 
821
 
822
+ class DuplicateTableColumns(TypeDependentAugmentor):
823
  """Duplicates specific columns of a table for the given number of times.
824
 
825
  Args:
 
827
  times(int) - how many times to duplicate
828
  """
829
 
830
+ augmented_type = Table
831
+
832
  column_indices: List[int] = []
833
  times: int = 1
834
 
 
860
  return {"header": duplicated_header, "rows": duplicated_rows}
861
 
862
 
863
+ class InsertEmptyTableRows(TypeDependentAugmentor):
864
  """Inserts empty rows in a table randomly for the given number of times.
865
 
866
  Args:
867
  times(int) - how many times to insert
868
  """
869
 
870
+ augmented_type = Table
871
+
872
  times: int = 0
873
 
874
  def process_value(self, table: Any) -> Any:
 
888
 
889
  # Return the modified table
890
  return {"header": header, "rows": rows}
891
+
892
+
893
+ class MaskColumnsNames(TypeDependentAugmentor):
894
+ """Mask the names of tables columns with dummies "Col1", "Col2" etc."""
895
+
896
+ augmented_type = Table
897
+
898
+ def process_value(self, table: Any) -> Any:
899
+ masked_header = ["Col" + str(ind + 1) for ind in range(len(table["header"]))]
900
+
901
+ return {"header": masked_header, "rows": table["rows"]}
902
+
903
+
904
+ class ShuffleColumnsNames(TypeDependentAugmentor):
905
+ """Shuffle table columns names to be displayed in random order."""
906
+
907
+ augmented_type = Table
908
+
909
+ def process_value(self, table: Any) -> Any:
910
+ shuffled_header = table["header"]
911
+ random.shuffle(shuffled_header)
912
+
913
+ return {"header": shuffled_header, "rows": table["rows"]}
task.py CHANGED
@@ -2,11 +2,11 @@ import warnings
2
  from functools import lru_cache
3
  from typing import Any, Dict, List, Optional, Union
4
 
5
- from .artifact import fetch_artifact
6
  from .deprecation_utils import deprecation
7
  from .error_utils import Documentation, UnitxtError, UnitxtWarning
8
  from .logging_utils import get_logger
9
  from .operator import InstanceOperator
 
10
  from .settings_utils import get_constants
11
  from .type_utils import (
12
  Type,
@@ -35,7 +35,7 @@ def parse_string_types_instead_of_actual_objects(obj):
35
  return parse_type_string(obj)
36
 
37
 
38
- class Task(InstanceOperator):
39
  """Task packs the different instance fields into dictionaries by their roles in the task.
40
 
41
  Attributes:
@@ -184,10 +184,10 @@ class Task(InstanceOperator):
184
  data["prediction_type"] = to_type_string(data["prediction_type"])
185
  return data
186
 
187
- @staticmethod
188
  @lru_cache(maxsize=None)
189
- def get_metric_prediction_type(metric_id: str):
190
- metric = fetch_artifact(metric_id)[0]
191
  return metric.prediction_type
192
 
193
  def check_metrics_type(self) -> None:
 
2
  from functools import lru_cache
3
  from typing import Any, Dict, List, Optional, Union
4
 
 
5
  from .deprecation_utils import deprecation
6
  from .error_utils import Documentation, UnitxtError, UnitxtWarning
7
  from .logging_utils import get_logger
8
  from .operator import InstanceOperator
9
+ from .operators import ArtifactFetcherMixin
10
  from .settings_utils import get_constants
11
  from .type_utils import (
12
  Type,
 
35
  return parse_type_string(obj)
36
 
37
 
38
+ class Task(InstanceOperator, ArtifactFetcherMixin):
39
  """Task packs the different instance fields into dictionaries by their roles in the task.
40
 
41
  Attributes:
 
184
  data["prediction_type"] = to_type_string(data["prediction_type"])
185
  return data
186
 
187
+ @classmethod
188
  @lru_cache(maxsize=None)
189
+ def get_metric_prediction_type(cls, metric_id: str):
190
+ metric = cls.get_artifact(metric_id)
191
  return metric.prediction_type
192
 
193
  def check_metrics_type(self) -> None:
templates.py CHANGED
@@ -8,7 +8,7 @@ from .collections import DictCollection, ListCollection
8
  from .dataclass import NonPositionalField
9
  from .dict_utils import dict_set
10
  from .error_utils import Documentation, UnitxtError
11
- from .operator import InstanceOperator
12
  from .random_utils import new_random_generator
13
  from .serializers import (
14
  DialogSerializer,
@@ -18,9 +18,10 @@ from .serializers import (
18
  NumberQuantizingSerializer,
19
  Serializer,
20
  TableSerializer,
 
21
  )
22
  from .settings_utils import get_constants
23
- from .type_utils import isoftype
24
 
25
  constants = get_constants()
26
 
@@ -59,6 +60,7 @@ class Template(InstanceOperator):
59
  default_factory=lambda: MultiTypeSerializer(
60
  serializers=[
61
  ImageSerializer(),
 
62
  TableSerializer(),
63
  DialogSerializer(),
64
  ListSerializer(),
@@ -66,6 +68,12 @@ class Template(InstanceOperator):
66
  )
67
  )
68
 
 
 
 
 
 
 
69
  def input_fields_to_instruction_and_target_prefix(self, input_fields):
70
  instruction = self.apply_formatting(
71
  input_fields, "input field", self.instruction, "instruction"
 
8
  from .dataclass import NonPositionalField
9
  from .dict_utils import dict_set
10
  from .error_utils import Documentation, UnitxtError
11
+ from .operator import InstanceOperator, Operator
12
  from .random_utils import new_random_generator
13
  from .serializers import (
14
  DialogSerializer,
 
18
  NumberQuantizingSerializer,
19
  Serializer,
20
  TableSerializer,
21
+ VideoSerializer,
22
  )
23
  from .settings_utils import get_constants
24
+ from .type_utils import isoftype, to_type_string
25
 
26
  constants = get_constants()
27
 
 
60
  default_factory=lambda: MultiTypeSerializer(
61
  serializers=[
62
  ImageSerializer(),
63
+ VideoSerializer(),
64
  TableSerializer(),
65
  DialogSerializer(),
66
  ListSerializer(),
 
68
  )
69
  )
70
 
71
+ def verify(self):
72
+ super().verify()
73
+ assert isoftype(
74
+ self.postprocessors, List[Union[Operator, str]]
75
+ ), f"The template post processors field '{self.postprocessors}' is not a list of processors. Instead it is of type '{to_type_string(type(self.postprocessors))}'."
76
+
77
  def input_fields_to_instruction_and_target_prefix(self, input_fields):
78
  instruction = self.apply_formatting(
79
  input_fields, "input field", self.instruction, "instruction"
text_utils.py CHANGED
@@ -114,6 +114,58 @@ def construct_dict_str(d, indent=0, indent_delta=4, max_chars=None, keys=None):
114
  return res
115
 
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  def print_dict(
118
  d, indent=0, indent_delta=4, max_chars=None, keys_to_print=None, log_level="info"
119
  ):
@@ -122,6 +174,13 @@ def print_dict(
122
  getattr(logger, log_level)(dict_str)
123
 
124
 
 
 
 
 
 
 
 
125
  def nested_tuple_to_string(nested_tuple: tuple) -> str:
126
  """Converts a nested tuple to a string, with elements separated by underscores.
127
 
 
114
  return res
115
 
116
 
117
+ def construct_dict_as_yaml_lines(d, indent_delta=2) -> List[str]:
118
+ """Constructs the lines of a dictionary formatted as yaml.
119
+
120
+ Args:
121
+ d: The element to be formatted.
122
+ indent_delta (int, optional): The amount of spaces to add for each level of indentation. Defaults to 2.
123
+ """
124
+
125
+ def is_simple(val) -> bool:
126
+ # if can show in same line as dictionary's key
127
+ return not isinstance(val, (dict, list)) or (len(val) == 0)
128
+
129
+ indent_delta_str = " " * indent_delta
130
+ ticked_indent_delta_str = indent_delta_str[:-2] + "- "
131
+ assert (
132
+ indent_delta >= 2
133
+ ), f"Needs at least 2 position indentations, for the case of list elements, that are to be preceded each by ' -'. Got indent_delta={indent_delta}."
134
+ res = [] # conputed hereunder as a list of lines, that are indented only at the end
135
+
136
+ if isinstance(d, dict):
137
+ if len(d) == 0:
138
+ return ["{}"]
139
+ for key, val in d.items():
140
+ res.append(key + ": ")
141
+ yaml_for_val = construct_dict_as_yaml_lines(val, indent_delta=indent_delta)
142
+ assert len(yaml_for_val) > 0
143
+ if is_simple(val):
144
+ assert len(yaml_for_val) == 1
145
+ res[-1] += yaml_for_val[0]
146
+ else:
147
+ for line in yaml_for_val:
148
+ res.append(indent_delta_str + line)
149
+ return res
150
+
151
+ if isinstance(d, list):
152
+ if len(d) == 0:
153
+ return ["[]"]
154
+ for val in d:
155
+ yaml_for_val = construct_dict_as_yaml_lines(val, indent_delta=indent_delta)
156
+ assert len(yaml_for_val) > 0
157
+ res.append(ticked_indent_delta_str + yaml_for_val[0])
158
+ for line in yaml_for_val[1:]:
159
+ res.append(indent_delta_str + line)
160
+ return res
161
+
162
+ # d1 = re.sub(r"(\n+)", r'"\1"', str(d))
163
+ d1 = str(d).replace("\n", "\\n").replace('"', '\\"')
164
+ if "\\n" in d1:
165
+ d1 = f'"{d1}"'
166
+ return [d1]
167
+
168
+
169
  def print_dict(
170
  d, indent=0, indent_delta=4, max_chars=None, keys_to_print=None, log_level="info"
171
  ):
 
174
  getattr(logger, log_level)(dict_str)
175
 
176
 
177
+ def print_dict_as_yaml(d: dict, indent_delta=2) -> str:
178
+ yaml_lines = construct_dict_as_yaml_lines(d)
179
+ # yaml_lines = [re.sub(r"(\n+)", r'"\1"', line) for line in yaml_lines]
180
+ # yaml_lines = [line.replace("\n", "\\n") for line in yaml_lines]
181
+ return "\n".join(yaml_lines)
182
+
183
+
184
  def nested_tuple_to_string(nested_tuple: tuple) -> str:
185
  """Converts a nested tuple to a string, with elements separated by underscores.
186
 
types.py CHANGED
@@ -16,6 +16,10 @@ Dialog = NewType("Dialog", List[Turn])
16
 
17
  class Image(TypedDict):
18
  image: Any
 
 
 
 
19
 
20
 
21
  class Audio(TypedDict):
@@ -34,3 +38,4 @@ register_type(Dialog)
34
  register_type(Table)
35
  register_type(Audio)
36
  register_type(Image)
 
 
16
 
17
  class Image(TypedDict):
18
  image: Any
19
+ format: str
20
+
21
+
22
+ Video = NewType("Video", List[Image])
23
 
24
 
25
  class Audio(TypedDict):
 
38
  register_type(Table)
39
  register_type(Audio)
40
  register_type(Image)
41
+ register_type(Video)
utils.py CHANGED
@@ -3,6 +3,8 @@ import importlib.util
3
  import json
4
  import os
5
  import re
 
 
6
  from functools import lru_cache
7
  from typing import Any, Dict
8
 
@@ -18,6 +20,93 @@ class Singleton(type):
18
  return cls._instances[cls]
19
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def flatten_dict(
22
  d: Dict[str, Any], parent_key: str = "", sep: str = "_"
23
  ) -> Dict[str, Any]:
@@ -233,3 +322,15 @@ def recursive_shallow_copy(obj):
233
  A recursively shallow-copied version of the original object.
234
  """
235
  return recursive_copy(obj, shallow_copy)
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import json
4
  import os
5
  import re
6
+ import threading
7
+ from collections import OrderedDict
8
  from functools import lru_cache
9
  from typing import Any, Dict
10
 
 
20
  return cls._instances[cls]
21
 
22
 
23
+ class LRUCache:
24
+ """An LRU (Least Recently Used) cache that stores a limited number of items.
25
+
26
+ This cache automatically removes the least recently used item when it
27
+ exceeds its max size. It behaves similarly to a dictionary, allowing
28
+ items to be added and accessed using `[]` syntax.
29
+
30
+ This implementation is thread-safe, using a lock to ensure that only one
31
+ thread can modify or access the cache at any time.
32
+
33
+ Attributes:
34
+ max_size (int): The maximum number of items to store in the cache.
35
+ Items exceeding this limit are automatically removed based on least
36
+ recent usage.
37
+ """
38
+
39
+ def __init__(self, max_size=10):
40
+ self._max_size = max_size
41
+ self._cache = OrderedDict()
42
+ self._lock = threading.Lock() # Lock to ensure thread safety
43
+
44
+ @property
45
+ def max_size(self):
46
+ with self._lock:
47
+ return self._max_size
48
+
49
+ @max_size.setter
50
+ def max_size(self, size):
51
+ with self._lock:
52
+ self._max_size = size
53
+ # Adjust the cache if the new size is smaller than the current number of items
54
+ while len(self._cache) > self._max_size:
55
+ self._cache.popitem(last=False)
56
+
57
+ def __setitem__(self, key, value):
58
+ with self._lock:
59
+ # If the key already exists, remove it first to refresh its order
60
+ if key in self._cache:
61
+ self._cache.pop(key)
62
+
63
+ # Add the new item to the cache (most recently used)
64
+ self._cache[key] = value
65
+
66
+ # If the cache exceeds the specified size, remove the least recently used item
67
+ while len(self._cache) > self._max_size:
68
+ self._cache.popitem(last=False)
69
+
70
+ def __getitem__(self, key):
71
+ with self._lock:
72
+ if key in self._cache:
73
+ # Move the accessed item to the end (mark as most recently used)
74
+ value = self._cache.pop(key)
75
+ self._cache[key] = value
76
+ return value
77
+ raise KeyError(f"{key} not found in cache")
78
+
79
+ def set(self, key, value):
80
+ """Sets a key-value pair in the cache."""
81
+ with self._lock:
82
+ if key in self._cache:
83
+ self._cache.pop(key)
84
+ self._cache[key] = value
85
+ while len(self._cache) > self._max_size:
86
+ self._cache.popitem(last=False)
87
+
88
+ def get(self, key, default=None):
89
+ """Gets a value from the cache by key, returning `default` if the key is not found."""
90
+ with self._lock:
91
+ if key in self._cache:
92
+ value = self._cache.pop(key)
93
+ self._cache[key] = value # Move item to end to mark as recently used
94
+ return value
95
+ return default
96
+
97
+ def __contains__(self, key):
98
+ with self._lock:
99
+ return key in self._cache
100
+
101
+ def __len__(self):
102
+ with self._lock:
103
+ return len(self._cache)
104
+
105
+ def __repr__(self):
106
+ with self._lock:
107
+ return f"LRUCache(max_size={self._max_size}, items={list(self._cache.items())})"
108
+
109
+
110
  def flatten_dict(
111
  d: Dict[str, Any], parent_key: str = "", sep: str = "_"
112
  ) -> Dict[str, Any]:
 
322
  A recursively shallow-copied version of the original object.
323
  """
324
  return recursive_copy(obj, shallow_copy)
325
+
326
+
327
+ class LongString(str):
328
+ def __new__(cls, value, *, repr_str=None):
329
+ obj = super().__new__(cls, value)
330
+ obj._repr_str = repr_str
331
+ return obj
332
+
333
+ def __repr__(self):
334
+ if self._repr_str is not None:
335
+ return self._repr_str
336
+ return super().__repr__()
version.py CHANGED
@@ -1 +1 @@
1
- version = "1.14.1"
 
1
+ version = "1.15.0"