text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def get_dataloaders(model_name: str, batch_size: int = 16):
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
return tokenizer.pad(
examples,
padding="longest",
pad_to_multiple_of=16, # Specific for FP8
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=16,
drop_last=True,
)
return train_dataloader, eval_dataloader
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None):
"""
Returns a tuple of:
- Model
- Optimizer
- Train dataloader (prepared)
- Eval dataloader (prepared)
- LR Scheduler
Suitable for training on the MRPC dataset
"""
from torch.optim import AdamW
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
from accelerate import Accelerator
if accelerator is None:
accelerator = Accelerator()
model = AutoModelForSequenceClassification.from_pretrained(model_name)
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
optimizer = AdamW(model.parameters(), lr=0.0001)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=len(train_dataloader) * 2,
)
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
def get_named_parameters(model):
"""
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
from parallel)
"""
from accelerate.utils import extract_model_from_parallel
model = extract_model_from_parallel(model)
return {n: p for n, p in model.named_parameters()}
def evaluate_model(model, dataloader, metric, accelerator=None):
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
model.eval()
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
references = batch["labels"]
if accelerator is not None and accelerator.num_processes > 1:
predictions, references = accelerator.gather_for_metrics((predictions, references))
metric.add_batch(predictions=predictions, references=references)
return metric.compute()
| accelerate/benchmarks/fp8/transformer_engine/fp8_utils.py/0 | {
"file_path": "accelerate/benchmarks/fp8/transformer_engine/fp8_utils.py",
"repo_id": "accelerate",
"token_count": 1601
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# The Command Line
Below is a list of all the available commands 🤗 Accelerate with their parameters
## accelerate config
**Command**:
`accelerate config` or `accelerate-config`
Launches a series of prompts to create and save a `default_config.yml` configuration file for your training system. Should
always be ran first on your machine.
**Usage**:
```bash
accelerate config [arguments]
```
**Optional Arguments**:
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
* `-h`, `--help` (`bool`) -- Show a help message and exit
## accelerate config default
**Command**:
`accelerate config default` or `accelerate-config default`
Create a default config file for Accelerate with only a few flags set.
**Usage**:
```bash
accelerate config default [arguments]
```
**Optional Arguments**:
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
* `-h`, `--help` (`bool`) -- Show a help message and exit
* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
## accelerate config update
**Command**:
`accelerate config update` or `accelerate-config update`
Update an existing config file with the latest defaults while maintaining the old configuration.
**Usage**:
```bash
accelerate config update [arguments]
```
**Optional Arguments**:
* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
* `-h`, `--help` (`bool`) -- Show a help message and exit
## accelerate env
**Command**:
`accelerate env` or `accelerate-env` or `python -m accelerate.commands.env`
Lists the contents of the passed 🤗 Accelerate configuration file. Should always be used when opening an issue on the [GitHub repository](https://github.com/huggingface/accelerate).
**Usage**:
```bash
accelerate env [arguments]
```
**Optional Arguments**:
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
* `-h`, `--help` (`bool`) -- Show a help message and exit
## accelerate launch
**Command**:
`accelerate launch` or `accelerate-launch` or `python -m accelerate.commands.launch`
Launches a specified script on a distributed system with the right parameters.
**Usage**:
```bash
accelerate launch [arguments] {training_script} --{training_script-argument-1} --{training_script-argument-2} ...
```
**Positional Arguments**:
- `{training_script}` -- The full path to the script to be launched in parallel
- `--{training_script-argument-1}` -- Arguments of the training script
**Optional Arguments**:
* `-h`, `--help` (`bool`) -- Show a help message and exit
* `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script.
* `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.
* `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.
* `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.
* `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations).
The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their
values. They can also be passed in manually.
**Hardware Selection Arguments**:
* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.
* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training.
* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.
* `--ipex` (`bool`) -- Whether or not this should launch an Intel Pytorch Extension (IPEX) training.
**Resource Selection Arguments**:
The following arguments are useful for fine-tuning how available hardware should be used
* `--mixed_precision {no,fp16,bf16,fp8}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
* `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.
* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.
* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.
* `--enable_cpu_affinity` (`bool`) -- Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.
**Training Paradigm Arguments**:
The following arguments are useful for selecting which training paradigm to use.
* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.
* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.
* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically.
**Distributed GPU Arguments**:
The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`:
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list
* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.
* `--machine_rank` (`int`) -- The rank of the machine on which this script is launched.
* `--main_process_ip` (`str`) -- The IP address of the machine of rank 0.
* `--main_process_port` (`int`) -- The port to use to communicate with the machine of rank 0.
* `-t`, `--tee` (`str`) -- Tee std streams into a log file and also to console.
* `--log_dir` (`str`) -- Base directory to use for log files when using torchrun/torch.distributed.run as launcher. Use with --tee to redirect std streams info log files.
* `--role` (`str`) -- User-defined role for the workers.
* `--rdzv_backend` (`str`) -- The rendezvous method to use, such as 'static' (the default) or 'c10d'
* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).
* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.
* `--monitor_interval` (`int`) -- Interval, in seconds, to monitor the state of workers.
**TPU Arguments**:
The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`:
* `--tpu_cluster` (`bool`) -- Whether to use a GCP TPU pod for training.
* `--tpu_use_sudo` (`bool`) -- Whether to use `sudo` when running the TPU training script in each pod.
* `--vm` (`str`) -- List of single Compute VM instance names. If not provided we assume usage of instance groups. For TPU pods.
* `--env` (`str`) -- List of environment variables to set on the Compute VM instances. For TPU pods.
* `--main_training_function` (`str`) -- The name of the main function to be executed in your script (only for TPU training).
* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.
**DeepSpeed Arguments**:
The following arguments are only useful when `use_deepspeed` is passed or `deepspeed` is configured through `accelerate config`:
* `--deepspeed_config_file` (`str`) -- DeepSpeed config file.
* `--zero_stage` (`int`) -- DeepSpeed's ZeRO optimization stage.
* `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states.
* `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters.
* `--offload_optimizer_nvme_path` (`str`) -- Decides Nvme Path to offload optimizer states.
* `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script.
* `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script.
* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.
* `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3.
* `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources.
* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using mutli-node setup.
* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using mutli-node setup.
* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use.
* `--deepspeed_moe_layer_cls_names` (`str`) -- comma-separated list of transformer MoE layer class names (case-sensitive) to wrap, e.g, `MixtralSparseMoeBlock` `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock`
**Fully Sharded Data Parallelism Arguments**:
The following arguments are only useful when `use_fsdp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.
* `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.
* `--fsdp_sharding_strategy` (`int`) -- FSDP's Sharding Strategy.
* `--fsdp_auto_wrap_policy` (`str`) -- FSDP's auto wrap policy.
* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...
* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy.
* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type.
* `--fsdp_forward_prefetch` (`str`) -- FSDP forward prefetch.
* `--fsdp_use_orig_params` (`str`) -- If True, allows non-uniform `requires_grad` mixed in a FSDP unit.
* `--fsdp_cpu_ram_efficient_loading` (`str`) -- If true, only the first process loads the pretrained model checkoint while all other processes have empty weights. When using this, `--fsdp_sync_module_states` needs to True.
* `--fsdp_sync_module_states` (`str`) -- If true, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
* `--fsdp_activation_checkpointing` (`bool`) -- Decides Whether intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder
**Megatron-LM Arguments**:
The following arguments are only useful when `use_megatron_lm` is passed or Megatron-LM is configured through `accelerate config`:
* `--megatron_lm_tp_degree` (``) -- Megatron-LM's Tensor Parallelism (TP) degree.
* `--megatron_lm_pp_degree` (``) -- Megatron-LM's Pipeline Parallelism (PP) degree.
* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1.
* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.
* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Parallel (DP) ranks.
* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable).
**FP8 Arguments**:
* `--fp8_backend` (`str`) -- Choose a backend to train with FP8 (`te` or `msamp`)
* `--fp8_use_autocast_during_eval` (`bool`) -- Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.
* `--fp8_margin` (`int`) -- The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).
* `--fp8_interval` (`int`) -- The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).
* `--fp8_format` (`str`) -- The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).
* `--fp8_amax_history_len` (`int`) -- The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).
* `--fp8_amax_compute_algo` (`str`) -- The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).
* `--fp8_override_linear_precision` (`Tuple[bool, bool, bool]`) -- Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
* `--fp8_opt_level` (`str`) -- What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed)
**AWS SageMaker Arguments**:
The following arguments are only useful when training in SageMaker
* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job
* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job
## accelerate estimate-memory
**Command**:
`accelerate estimate-memory` or `accelerate-estimate-memory` or `python -m accelerate.commands.estimate`
Estimates the total vRAM a particular model hosted on the Hub needs to be loaded in with an estimate for training. Requires that `huggingface_hub` be installed.
<Tip>
When performing inference, typically add ≤20% to the result as overall allocation [as referenced here](https://blog.eleuther.ai/transformer-math/). We will have more extensive estimations in the future that will automatically be included in the calculation.
</Tip>
**Usage**:
```bash
accelerate estimate-memory {MODEL_NAME} --library_name {LIBRARY_NAME} --dtypes {dtype_1} {dtype_2} ...
```
**Required Arguments**:
* `MODEL_NAME` (`str`)-- The model name on the Hugging Face Hub
**Optional Arguments**:
* `--library_name {timm,transformers}` (`str`) -- The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub
* `--dtypes {float32,float16,int8,int4}` (`[{float32,float16,int8,int4} ...]`) -- The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`
* `--trust_remote_code` (`bool`) -- Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be passed for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.
## accelerate tpu-config
`accelerate tpu-config`
**Usage**:
```bash
accelerate tpu-config [arguments]
```
**Optional Arguments**:
* `-h`, `--help` (`bool`) -- Show a help message and exit
**Config Arguments**:
Arguments that can be configured through `accelerate config`.
* `--config_file` (`str`) -- Path to the config file to use for accelerate.
* `--tpu_name` (`str`) -- The name of the TPU to use. If not specified, will use the TPU specified in the config file.
* `--tpu_zone` (`str`) -- The zone of the TPU to use. If not specified, will use the zone specified in the config file.
**TPU Arguments**:
Arguments for options ran inside the TPU.
* `--command_file` (`str`) -- The path to the file containing the commands to run on the pod on startup.
* `--command` (`str`) -- A command to run on the pod. Can be passed multiple times.
* `--install_accelerate` (`bool`) -- Whether to install accelerate on the pod. Defaults to False.
* `--accelerate_version` (`str`) -- The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.
* `--debug` (`bool`) -- If set, will print the command that would be run instead of running it.
## accelerate test
`accelerate test` or `accelerate-test`
Runs `accelerate/test_utils/test_script.py` to verify that 🤗 Accelerate has been properly configured on your system and runs.
**Usage**:
```bash
accelerate test [arguments]
```
**Optional Arguments**:
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
* `-h`, `--help` (`bool`) -- Show a help message and exit
| accelerate/docs/source/package_reference/cli.md/0 | {
"file_path": "accelerate/docs/source/package_reference/cli.md",
"repo_id": "accelerate",
"token_count": 5970
} |
<!--
Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DDP Communication Hooks
Distributed Data Parallel (DDP) communication hooks provide a generic interface to control how gradients are communicated across workers by overriding the vanilla allreduce in `DistributedDataParallel`. A few built-in communication hooks are provided, and users can easily apply any of these hooks to optimize communication.
- **FP16 Compression Hook**: Compresses gradients by casting them to half-precision floating-point format (`torch.float16`), reducing communication overhead.
- **BF16 Compression Hook**: Similar to FP16, but uses the Brain Floating Point format (`torch.bfloat16`), which can be more efficient on certain hardware.
- **PowerSGD Hook**: An advanced gradient compression algorithm that provides high compression rates and can accelerate bandwidth-bound distributed training.
In this tutorial, you will see how to quickly set up DDP communication hooks and perform training with the utilities provided in Accelerate, which can be as simple as adding just one new line of code! This demonstrates how to use DDP communication hooks to optimize gradient communication in distributed training with the Accelerate library.
## FP16 Compression Hook
<hfoptions id="fp16">
<hfoption id="PyTorch">
```python
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
model = MyModel()
model = DDP(model, device_ids=[torch.cuda.current_device()])
model.register_comm_hook(state=None, hook=default_hooks.fp16_compress_hook)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
<hfoption id="Accelerate">
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.FP16)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
</hfoptions>
### BF16 Compression Hook
<Tip warning={true}>
BF16 Compression Hook API is experimental, and it requires NCCL version later than 2.9.6.
</Tip>
<hfoptions id="bf16">
<hfoption id="PyTorch">
```python
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
model = MyModel()
model = DDP(model, device_ids=[torch.cuda.current_device()])
model.register_comm_hook(state=None, hook=default_hooks.bf16_compress_hook)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
<hfoption id="Accelerate">
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.BF16)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
</hfoptions>
### PowerSGD Hook
<Tip warning={true}>
PowerSGD typically requires extra memory of the same size as the model’s gradients to enable error feedback, which can compensate for biased compressed communication and improve accuracy.
</Tip>
<hfoptions id="powerSGD">
<hfoption id="PyTorch">
```python
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
model = MyModel()
model = DDP(model, device_ids=[torch.cuda.current_device()])
state = powerSGD_hook.PowerSGDState(process_group=None)
model.register_comm_hook(state=state, hook=powerSGD_hook.powerSGD_hook)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
<hfoption id="Accelerate">
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.POWER_SGD)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
</hfoptions>
## DDP Communication Hooks utilities
There are two additional utilities for supporting optional functionalities with the communication hooks.
### comm_wrapper
`comm_wrapper` is an option to wrap a communication hook with additional functionality. For example, it can be used to combine FP16 compression with other communication strategies. Currently supported wrappers are `no`, `fp16`, and `bf16`.
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(
comm_hook=DDPCommunicationHookType.POWER_SGD,
comm_wrapper=DDPCommunicationHookType.FP16
)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
### comm_state_option
`comm_state_option` allows you to pass additional state information required by certain communication hooks. This is particularly useful for stateful hooks like `PowerSGD`, which require maintaining hyperparameters and internal states across training steps. Below is an example showcasing the use of `comm_state_option` with the `PowerSGD` hook.
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(
comm_hook=DDPCommunicationHookType.POWER_SGD,
comm_state_option={"matrix_approximation_rank": 2}
)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
For more advanced usage and additional hooks, refer to the [PyTorch DDP Communication Hooks documentation](https://pytorch.org/docs/stable/ddp_comm_hooks.html).
| accelerate/docs/source/usage_guides/ddp_comm_hook.md/0 | {
"file_path": "accelerate/docs/source/usage_guides/ddp_comm_hook.md",
"repo_id": "accelerate",
"token_count": 3366
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Experiment trackers
There are a large number of experiment tracking APIs available, however getting them all to work in a multi-processing environment can oftentimes be complex.
Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`]
## Integrated Trackers
Currently `Accelerate` supports seven trackers out-of-the-box:
- TensorBoard
- WandB
- CometML
- Aim
- MLFlow
- ClearML
- DVCLive
To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:
```python
from accelerate import Accelerator
from accelerate.utils import LoggerType
accelerator = Accelerator(log_with="all") # For all available trackers in the environment
accelerator = Accelerator(log_with="wandb")
accelerator = Accelerator(log_with=["wandb", LoggerType.TENSORBOARD])
```
At the start of your experiment [`Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:
```python
hps = {"num_iterations": 5, "learning_rate": 1e-2}
accelerator.init_trackers("my_project", config=hps)
```
When you are ready to log any data, [`Accelerator.log`] should be used.
A `step` can also be passed in to correlate the data with a particular step in the training loop.
```python
accelerator.log({"train_loss": 1.12, "valid_loss": 0.8}, step=1)
```
Once you've finished training, make sure to run [`Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any.
```python
accelerator.end_training()
```
A full example is below:
```python
from accelerate import Accelerator
accelerator = Accelerator(log_with="all")
config = {
"num_iterations": 5,
"learning_rate": 1e-2,
"loss_function": str(my_loss_function),
}
accelerator.init_trackers("example_project", config=config)
my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)
device = accelerator.device
my_model.to(device)
for iteration in range(config["num_iterations"]):
for step, batch in enumerate(my_training_dataloader):
my_optimizer.zero_grad()
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
outputs = my_model(inputs)
loss = my_loss_function(outputs, targets)
accelerator.backward(loss)
my_optimizer.step()
accelerator.log({"training_loss": loss}, step=step)
accelerator.end_training()
```
If a tracker requires a directory to save data to, such as `TensorBoard`, then pass the directory path to `project_dir`. The `project_dir` parameter is useful
when there are other configurations to be combined with in the [`~utils.ProjectConfiguration`] data class. For example, you can save the TensorBoard data to `project_dir` and everything else can be logged in the `logging_dir` parameter of [`~utils.ProjectConfiguration`:
```python
accelerator = Accelerator(log_with="tensorboard", project_dir=".")
# use with ProjectConfiguration
config = ProjectConfiguration(project_dir=".", logging_dir="another/directory")
accelerator = Accelerator(log_with="tensorboard", project_config=config)
```
## Implementing Custom Trackers
To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`GeneralTracker`] class.
Every tracker must implement three functions and have three properties:
- `__init__`:
- Should store a `run_name` and initialize the tracker API of the integrated library.
- If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added.
- `store_init_configuration`:
- Should take in a `values` dictionary and store them as a one-time experiment configuration
- `log`:
- Should take in a `values` dictionary and a `step`, and should log them to the run
- `name` (`str`):
- A unique string name for the tracker, such as `"wandb"` for the wandb tracker.
- This will be used for interacting with this tracker specifically
- `requires_logging_directory` (`bool`):
- Whether a `logging_dir` is needed for this particular tracker and if it uses one.
- `tracker`:
- This should be implemented as a `@property` function
- Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`.
Each method should also utilize the [`state.PartialState`] class if the logger should only be executed on the main process for instance.
A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information and logging just on
the main process:
```python
from accelerate.tracking import GeneralTracker, on_main_process
from typing import Optional
import wandb
class MyCustomTracker(GeneralTracker):
name = "wandb"
requires_logging_directory = False
@on_main_process
def __init__(self, run_name: str):
self.run_name = run_name
run = wandb.init(self.run_name)
@property
def tracker(self):
return self.run.run
@on_main_process
def store_init_configuration(self, values: dict):
wandb.config(values)
@on_main_process
def log(self, values: dict, step: Optional[int] = None):
wandb.log(values, step=step)
```
When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`Accelerator.log_with`] to have it automatically
be used with the API:
```python
tracker = MyCustomTracker("some_run_name")
accelerator = Accelerator(log_with=tracker)
```
These also can be mixed with existing trackers, including with `"all"`:
```python
tracker = MyCustomTracker("some_run_name")
accelerator = Accelerator(log_with=[tracker, "all"])
```
## Accessing the internal tracker
If some custom interactions with a tracker might be wanted directly, you can quickly access one using the
[`Accelerator.get_tracker`] method. Just pass in the string corresponding to a tracker's `.name` attribute
and it will return that tracker on the main process.
This example shows doing so with wandb:
```python
wandb_tracker = accelerator.get_tracker("wandb")
```
From there you can interact with `wandb`'s `run` object like normal:
```python
wandb_tracker.log_artifact(some_artifact_to_log)
```
<Tip>
Trackers built in Accelerate will automatically execute on the correct process,
so if a tracker is only meant to be ran on the main process it will do so
automatically.
</Tip>
If you want to truly remove Accelerate's wrapping entirely, you can
achieve the same outcome with:
```python
wandb_tracker = accelerator.get_tracker("wandb", unwrap=True)
if accelerator.is_main_process:
wandb_tracker.log_artifact(some_artifact_to_log)
```
## When a wrapper cannot work
If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:
```diff
from accelerate import Accelerator
+ import neptune
accelerator = Accelerator()
+ run = neptune.init_run(...)
my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)
device = accelerator.device
my_model.to(device)
for iteration in config["num_iterations"]:
for batch in my_training_dataloader:
my_optimizer.zero_grad()
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
outputs = my_model(inputs)
loss = my_loss_function(outputs, targets)
total_loss += loss
accelerator.backward(loss)
my_optimizer.step()
+ if accelerator.is_main_process:
+ run["logs/training/batch/loss"].log(loss)
```
| accelerate/docs/source/usage_guides/tracking.md/0 | {
"file_path": "accelerate/docs/source/usage_guides/tracking.md",
"repo_id": "accelerate",
"token_count": 2703
} |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
config["num_epochs"] = 2
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather((predictions, batch["labels"]))
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(eval_dataloader) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate/examples/by_feature/multi_process_metrics.py/0 | {
"file_path": "accelerate/examples/by_feature/multi_process_metrics.py",
"repo_id": "accelerate",
"token_count": 3665
} |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import runhouse as rh
import torch
from nlp_example import training_function
from accelerate.utils import PrepareForLaunch, patch_environment
def launch_train(*args):
num_processes = torch.cuda.device_count()
print(f"Device count: {num_processes}")
with patch_environment(
world_size=num_processes, master_addr="127.0.0.1", master_port="29500", mixed_precision=args[1].mixed_precision
):
launcher = PrepareForLaunch(training_function, distributed_type="MULTI_GPU")
torch.multiprocessing.start_processes(launcher, args=args, nprocs=num_processes, start_method="spawn")
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/main/rh_primitives/cluster.html#hardware-setup
# for cloud access setup instructions (if using on-demand hardware), and for API specifications.
# on-demand GPU
# gpu = rh.cluster(name='rh-cluster', instance_type='V100:1', provider='cheapest', use_spot=False) # single GPU
gpu = rh.cluster(name="rh-cluster", instance_type="V100:4", provider="cheapest", use_spot=False) # multi GPU
gpu.up_if_not()
# on-prem GPU
# gpu = rh.cluster(
# ips=["ip_addr"], ssh_creds={ssh_user:"<username>", ssh_private_key:"<key_path>"}, name="rh-cluster"
# )
# Set up remote function
reqs = [
"pip:./",
"transformers",
"datasets",
"evaluate",
"tqdm",
"scipy",
"scikit-learn",
"tensorboard",
"torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117",
]
launch_train_gpu = rh.function(fn=launch_train, system=gpu, reqs=reqs, name="train_bert_glue")
# Define train args/config, run train function
train_args = argparse.Namespace(cpu=False, mixed_precision="fp16")
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
launch_train_gpu(config, train_args, stream_logs=True)
# Alternatively, we can just run as instructed in the README (but only because there's already a wrapper CLI):
# gpu.install_packages(reqs)
# gpu.run(['accelerate launch --multi_gpu accelerate/examples/nlp_example.py'])
| accelerate/examples/multigpu_remote_launcher.py/0 | {
"file_path": "accelerate/examples/multigpu_remote_launcher.py",
"repo_id": "accelerate",
"token_count": 1026
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage3(Scene):
def construct(self):
step_1 = MarkupText(
f"To combat this, Accelerate employs one of two different\nSampler wrapper methods depending on the scenario:",
font_size=24
)
step_1.move_to([0, 1.5, 0])
self.add(step_1)
step_2 = MarkupText(
f"1. Sharding the dataset before drawing:\n\t● <span fgcolor='{RED}'>IterableDatasetShard</span>\n\t● <span fgcolor='{RED}'>BatchSamplerShard</span>",
font_size=24,
).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
self.add(step_2)
step_3 = MarkupText(
f"\n\n2. Splitting the batch after drawing:\n\t● <span fgcolor='{BLUE}'>DataLoaderDispatcher</span>",
font_size=24,
).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
self.add(step_3) | accelerate/manim_animations/dataloaders/stage_3.py/0 | {
"file_path": "accelerate/manim_animations/dataloaders/stage_3.py",
"repo_id": "accelerate",
"token_count": 577
} |
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import yaml
from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
hf_cache_home = os.path.expanduser(
os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
cache_dir = os.path.join(hf_cache_home, "accelerate")
default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
# For backward compatibility: the default config is the json one if it's the only existing file.
if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
default_config_file = default_yaml_config_file
else:
default_config_file = default_json_config_file
def load_config_from_file(config_file):
if config_file is not None:
if not os.path.isfile(config_file):
raise FileNotFoundError(
f"The passed configuration file `{config_file}` does not exist. "
"Please pass an existing file to `accelerate launch`, or use the default one "
"created through `accelerate config` and run `accelerate launch` "
"without the `--config_file` argument."
)
else:
config_file = default_config_file
with open(config_file, encoding="utf-8") as f:
if config_file.endswith(".json"):
if (
json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
== ComputeEnvironment.LOCAL_MACHINE
):
config_class = ClusterConfig
else:
config_class = SageMakerConfig
return config_class.from_json_file(json_file=config_file)
else:
if (
yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
== ComputeEnvironment.LOCAL_MACHINE
):
config_class = ClusterConfig
else:
config_class = SageMakerConfig
return config_class.from_yaml_file(yaml_file=config_file)
@dataclass
class BaseConfig:
compute_environment: ComputeEnvironment
distributed_type: Union[DistributedType, SageMakerDistributedType]
mixed_precision: str
use_cpu: bool
debug: bool
def to_dict(self):
result = self.__dict__
# For serialization, it's best to convert Enums to strings (or their underlying value type).
def _convert_enums(value):
if isinstance(value, Enum):
return value.value
if isinstance(value, dict):
if not bool(value):
return None
for key1, value1 in value.items():
value[key1] = _convert_enums(value1)
return value
for key, value in result.items():
result[key] = _convert_enums(value)
result = {k: v for k, v in result.items() if v is not None}
return result
@staticmethod
def process_config(config_dict):
"""
Processes `config_dict` and sets default values for any missing keys
"""
if "compute_environment" not in config_dict:
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
if "distributed_type" not in config_dict:
raise ValueError("A `distributed_type` must be specified in the config file.")
if "num_processes" not in config_dict and config_dict["distributed_type"] == DistributedType.NO:
config_dict["num_processes"] = 1
if "mixed_precision" not in config_dict:
config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
if "fp16" in config_dict: # Convert the config to the new format.
del config_dict["fp16"]
if "dynamo_backend" in config_dict: # Convert the config to the new format.
dynamo_backend = config_dict.pop("dynamo_backend")
config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
if "use_cpu" not in config_dict:
config_dict["use_cpu"] = False
if "debug" not in config_dict:
config_dict["debug"] = False
if "enable_cpu_affinity" not in config_dict:
config_dict["enable_cpu_affinity"] = False
return config_dict
@classmethod
def from_json_file(cls, json_file=None):
json_file = default_json_config_file if json_file is None else json_file
with open(json_file, encoding="utf-8") as f:
config_dict = json.load(f)
config_dict = cls.process_config(config_dict)
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
if len(extra_keys) > 0:
raise ValueError(
f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
" version or fix (and potentially remove) these keys from your config file."
)
return cls(**config_dict)
def to_json_file(self, json_file):
with open(json_file, "w", encoding="utf-8") as f:
content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
f.write(content)
@classmethod
def from_yaml_file(cls, yaml_file=None):
yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
with open(yaml_file, encoding="utf-8") as f:
config_dict = yaml.safe_load(f)
config_dict = cls.process_config(config_dict)
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
if len(extra_keys) > 0:
raise ValueError(
f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
" version or fix (and potentially remove) these keys from your config file."
)
return cls(**config_dict)
def to_yaml_file(self, yaml_file):
with open(yaml_file, "w", encoding="utf-8") as f:
yaml.safe_dump(self.to_dict(), f)
def __post_init__(self):
if isinstance(self.compute_environment, str):
self.compute_environment = ComputeEnvironment(self.compute_environment)
if isinstance(self.distributed_type, str):
if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
self.distributed_type = SageMakerDistributedType(self.distributed_type)
else:
self.distributed_type = DistributedType(self.distributed_type)
if getattr(self, "dynamo_config", None) is None:
self.dynamo_config = {}
@dataclass
class ClusterConfig(BaseConfig):
num_processes: int = -1 # For instance if we use SLURM and the user manually passes it in
machine_rank: int = 0
num_machines: int = 1
gpu_ids: Optional[str] = None
main_process_ip: Optional[str] = None
main_process_port: Optional[int] = None
rdzv_backend: Optional[str] = "static"
same_network: Optional[bool] = False
main_training_function: str = "main"
enable_cpu_affinity: bool = False
# args for FP8 training
fp8_config: dict = None
# args for deepspeed_plugin
deepspeed_config: dict = None
# args for fsdp
fsdp_config: dict = None
# args for tp
tp_config: dict = None
# args for megatron_lm
megatron_lm_config: dict = None
# args for ipex
ipex_config: dict = None
# args for mpirun
mpirun_config: dict = None
# args for TPU
downcast_bf16: bool = False
# args for TPU pods
tpu_name: str = None
tpu_zone: str = None
tpu_use_cluster: bool = False
tpu_use_sudo: bool = False
command_file: str = None
commands: List[str] = None
tpu_vm: List[str] = None
tpu_env: List[str] = None
# args for dynamo
dynamo_config: dict = None
def __post_init__(self):
if self.deepspeed_config is None:
self.deepspeed_config = {}
if self.fsdp_config is None:
self.fsdp_config = {}
if self.tp_config is None:
self.tp_config = {}
if self.megatron_lm_config is None:
self.megatron_lm_config = {}
if self.ipex_config is None:
self.ipex_config = {}
if self.mpirun_config is None:
self.mpirun_config = {}
if self.fp8_config is None:
self.fp8_config = {}
return super().__post_init__()
@dataclass
class SageMakerConfig(BaseConfig):
ec2_instance_type: str
iam_role_name: str
image_uri: Optional[str] = None
profile: Optional[str] = None
region: str = "us-east-1"
num_machines: int = 1
gpu_ids: str = "all"
base_job_name: str = f"accelerate-sagemaker-{num_machines}"
pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
py_version: str = SAGEMAKER_PYTHON_VERSION
sagemaker_inputs_file: str = None
sagemaker_metrics_file: str = None
additional_args: dict = None
dynamo_config: dict = None
enable_cpu_affinity: bool = False
| accelerate/src/accelerate/commands/config/config_args.py/0 | {
"file_path": "accelerate/src/accelerate/commands/config/config_args.py",
"repo_id": "accelerate",
"token_count": 4284
} |
#!/usr/bin/env python
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def tpu_command_parser(subparsers=None):
if subparsers is not None:
parser = subparsers.add_parser("tpu-config", description=_description)
else:
parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description)
# Core arguments
config_args = parser.add_argument_group(
"Config Arguments", "Arguments that can be configured through `accelerate config`."
)
config_args.add_argument(
"--config_file",
type=str,
default=None,
help="Path to the config file to use for accelerate.",
)
config_args.add_argument(
"--tpu_name",
default=None,
help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.",
)
config_args.add_argument(
"--tpu_zone",
default=None,
help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.",
)
pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.")
pod_args.add_argument(
"--use_alpha",
action="store_true",
help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.",
)
pod_args.add_argument(
"--command_file",
default=None,
help="The path to the file containing the commands to run on the pod on startup.",
)
pod_args.add_argument(
"--command",
action="append",
nargs="+",
help="A command to run on the pod. Can be passed multiple times.",
)
pod_args.add_argument(
"--install_accelerate",
action="store_true",
help="Whether to install accelerate on the pod. Defaults to False.",
)
pod_args.add_argument(
"--accelerate_version",
default="latest",
help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.",
)
pod_args.add_argument(
"--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
)
if subparsers is not None:
parser.set_defaults(func=tpu_command_launcher)
return parser
def tpu_command_launcher(args):
defaults = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(default_config_file):
defaults = load_config_from_file(args.config_file)
if not args.command_file and defaults.command_file is not None and not args.command:
args.command_file = defaults.command_file
if not args.command and defaults.commands is not None:
args.command = defaults.commands
if not args.tpu_name:
args.tpu_name = defaults.tpu_name
if not args.tpu_zone:
args.tpu_zone = defaults.tpu_zone
if args.accelerate_version == "dev":
args.accelerate_version = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
args.accelerate_version = "accelerate -U"
elif isinstance(parse(args.accelerate_version), Version):
args.accelerate_version = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod.")
if args.command_file:
with open(args.command_file) as f:
args.command = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], list):
args.command = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
new_cmd = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
args.command = "; ".join(new_cmd)
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
cmd = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(cmd)}")
return
subprocess.run(cmd)
print("Successfully setup pod.")
def main():
parser = tpu_command_parser()
args = parser.parse_args()
tpu_command_launcher(args)
| accelerate/src/accelerate/commands/tpu.py/0 | {
"file_path": "accelerate/src/accelerate/commands/tpu.py",
"repo_id": "accelerate",
"token_count": 2114
} |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
"""
Creates a set of `DataLoader`s for the `glue` dataset.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
model_name (`str`, *optional*):
"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
tokenized_datasets = datasets.map(
tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.XLA:
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
def evaluation_loop(accelerator, model, eval_dataloader, metric):
model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
predictions, references = accelerator.gather(
(predictions, batch["labels"])
) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(eval_dataloader) - 1:
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
return eval_metric["accuracy"]
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
model_name = args.model_name_or_path
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
# Instantiate optimizer
optimizer_cls = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
optimizer = optimizer_cls(params=model.parameters(), lr=lr)
if accelerator.state.deepspeed_plugin is not None:
gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
gradient_accumulation_steps = 1
max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=max_training_steps,
)
else:
lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to keep track of how many total steps we have iterated over
overall_step = 0
# We also need to keep track of the stating epoch so files are named properly
starting_epoch = 0
metric = evaluate.load("glue", "mrpc")
ending_epoch = num_epochs
if args.partial_train_epoch is not None:
ending_epoch = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
epoch_string = args.resume_from_checkpoint.split("epoch_")[1]
state_epoch_num = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
starting_epoch = int(state_epoch_num) + 1
accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
accelerator.print("resumed checkpoint performance:", accuracy)
accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0])
accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"])
with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f:
resumed_state = json.load(f)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
state = {}
for epoch in range(starting_epoch, ending_epoch):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
output_dir = f"epoch_{epoch}"
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
state["accuracy"] = accuracy
state["lr"] = lr_scheduler.get_lr()[0]
state["optimizer_lr"] = optimizer.param_groups[0]["lr"]
state["epoch"] = epoch
state["step"] = overall_step
accelerator.print(f"epoch {epoch}:", state)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f:
json.dump(state, f)
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-cased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--partial_train_epoch",
type=int,
default=None,
help="If passed, the training will stop after this number of epochs.",
)
parser.add_argument(
"--num_epochs",
type=int,
default=2,
help="Number of train epochs.",
)
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| accelerate/src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py/0 | {
"file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py",
"repo_id": "accelerate",
"token_count": 4199
} |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class RegressionDataset:
def __init__(self, a=2, b=3, length=64, seed=None):
rng = np.random.default_rng(seed)
self.length = length
self.x = rng.normal(size=(length,)).astype(np.float32)
self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32)
def __len__(self):
return self.length
def __getitem__(self, i):
return {"x": self.x[i], "y": self.y[i]}
class RegressionModel4XPU(torch.nn.Module):
def __init__(self, a=0, b=0, double_output=False):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor([2, 3]).float())
self.b = torch.nn.Parameter(torch.tensor([2, 3]).float())
self.first_batch = True
def forward(self, x=None):
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
self.first_batch = False
return x * self.a[0] + self.b[0]
class RegressionModel(torch.nn.Module):
def __init__(self, a=0, b=0, double_output=False):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(a).float())
self.b = torch.nn.Parameter(torch.tensor(b).float())
self.first_batch = True
def forward(self, x=None):
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
self.first_batch = False
return x * self.a + self.b
def mocked_dataloaders(accelerator, batch_size: int = 16):
from datasets import load_dataset
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
datasets = load_dataset("csv", data_files=data_files)
label_list = datasets["train"].unique("label")
label_to_id = {v: i for i, v in enumerate(label_list)}
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(
examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length"
)
if "label" in examples:
outputs["labels"] = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["sentence1", "sentence2", "label"],
)
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.XLA:
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2)
eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1)
return train_dataloader, eval_dataloader
def mocked_dataloaders_for_autoregressive_models(accelerator, batch_size: int = 16):
from datasets import load_dataset
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM-360M")
tokenizer.pad_token = tokenizer.eos_token
data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
datasets = load_dataset("csv", data_files=data_files)
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], truncation=True, max_length=None, return_attention_mask=False)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["sentence1", "sentence2", "label"],
)
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = (
128
if accelerator.distributed_type == DistributedType.XLA
else max([len(e["input_ids"]) for e in examples])
)
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
batch = tokenizer.pad(
examples,
padding="max_length",
max_length=max_length + 1,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
batch["labels"] = batch["input_ids"][:, 1:]
batch["input_ids"] = batch["input_ids"][:, :-1]
batch["labels"] = torch.where(batch["labels"] == tokenizer.pad_token_id, -100, batch["labels"])
return batch
# Instantiate dataloaders.
train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=False, collate_fn=collate_fn, batch_size=2)
eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1)
return train_dataloader, eval_dataloader
| accelerate/src/accelerate/test_utils/training.py/0 | {
"file_path": "accelerate/src/accelerate/test_utils/training.py",
"repo_id": "accelerate",
"token_count": 2582
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import platform
import re
import socket
from codecs import encode
from functools import partial, reduce
from types import MethodType
from typing import OrderedDict
import numpy as np
import torch
from packaging.version import Version
from safetensors.torch import save_file as safe_save_file
from ..commands.config.default import write_basic_config # noqa: F401
from ..logging import get_logger
from ..state import PartialState
from .constants import FSDP_PYTORCH_VERSION
from .dataclasses import DistributedType
from .imports import (
is_deepspeed_available,
is_numpy_available,
is_torch_distributed_available,
is_torch_xla_available,
is_weights_only_available,
)
from .modeling import id_tensor_storage
from .transformer_engine import convert_model
from .versions import is_torch_version
logger = get_logger(__name__)
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
def is_compiled_module(module):
"""
Check whether the module was compiled with torch.compile()
"""
if not hasattr(torch, "_dynamo"):
return False
return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
def extract_model_from_parallel(
model, keep_fp32_wrapper: bool = True, keep_torch_compile: bool = True, recursive: bool = False
):
"""
Extract a model from its distributed containers.
Args:
model (`torch.nn.Module`):
The model to extract.
keep_fp32_wrapper (`bool`, *optional*):
Whether to remove mixed precision hooks from the model.
keep_torch_compile (`bool`, *optional*):
Whether to unwrap compiled model.
recursive (`bool`, *optional*, defaults to `False`):
Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers
recursively, not just the top-level distributed containers.
Returns:
`torch.nn.Module`: The extracted model.
"""
options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
is_compiled = is_compiled_module(model)
if is_compiled:
compiled_model = model
model = model._orig_mod
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
options += (DeepSpeedEngine,)
if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
options += (FSDP,)
while isinstance(model, options):
model = model.module
if recursive:
# This is needed in cases such as using FSDPv2 on XLA
def _recursive_unwrap(module):
# Wrapped modules are standardly wrapped as `module`, similar to the cases earlier
# with DDP, DataParallel, DeepSpeed, and FSDP
if hasattr(module, "module"):
unwrapped_module = _recursive_unwrap(module.module)
else:
unwrapped_module = module
# Next unwrap child sublayers recursively
for name, child in unwrapped_module.named_children():
setattr(unwrapped_module, name, _recursive_unwrap(child))
return unwrapped_module
# Start with top-level
model = _recursive_unwrap(model)
if not keep_fp32_wrapper:
forward = model.forward
original_forward = model.__dict__.pop("_original_forward", None)
if original_forward is not None:
while hasattr(forward, "__wrapped__"):
forward = forward.__wrapped__
if forward == original_forward:
break
model.forward = MethodType(forward, model)
if getattr(model, "_converted_to_transformer_engine", False):
convert_model(model, to_transformer_engine=False)
if keep_torch_compile and is_compiled:
compiled_model._orig_mod = model
model = compiled_model
return model
def wait_for_everyone():
"""
Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
<Tip warning={true}>
Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
</Tip>
"""
PartialState().wait_for_everyone()
def clean_state_dict_for_safetensors(state_dict: dict):
"""
Cleans the state dictionary from a model and removes tensor aliasing if present.
Args:
state_dict (`dict`):
The state dictionary from a model
"""
ptrs = collections.defaultdict(list)
# When bnb serialization is used, weights in state dict can be strings
for name, tensor in state_dict.items():
if not isinstance(tensor, str):
ptrs[id_tensor_storage(tensor)].append(name)
# These are all pointers of tensors with shared memory
shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
warn_names = set()
for names in shared_ptrs.values():
# When not all duplicates have been cleaned, we still remove those keys but put a clear warning.
# If the link between tensors was done at runtime then `from_pretrained` will not get
# the key back leading to random tensor. A proper warning will be shown
# during reload (if applicable), but since the file is not necessarily compatible with
# the config, better show a proper warning.
found_names = [name for name in names if name in state_dict]
warn_names.update(found_names[1:])
for name in found_names[1:]:
del state_dict[name]
if len(warn_names) > 0:
logger.warning(
f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
)
state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()}
return state_dict
def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
"""
Save the data to disk. Use in place of `torch.save()`.
Args:
obj:
The data to save
f:
The file (or file-like object) to use to save the data
save_on_each_node (`bool`, *optional*, defaults to `False`):
Whether to only save on the global main process
safe_serialization (`bool`, *optional*, defaults to `False`):
Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`).
"""
# When TorchXLA is enabled, it's necessary to transfer all data to the CPU before saving.
# Another issue arises with `id_tensor_storage`, which treats all XLA tensors as identical.
# If tensors remain on XLA, calling `clean_state_dict_for_safetensors` will result in only
# one XLA tensor remaining.
if PartialState().distributed_type == DistributedType.XLA:
obj = xm._maybe_convert_to_cpu(obj)
# Check if it's a model and remove duplicates
if safe_serialization:
save_func = partial(safe_save_file, metadata={"format": "pt"})
if isinstance(obj, OrderedDict):
obj = clean_state_dict_for_safetensors(obj)
else:
save_func = torch.save
if PartialState().is_main_process and not save_on_each_node:
save_func(obj, f)
elif PartialState().is_local_main_process and save_on_each_node:
save_func(obj, f)
# The following are considered "safe" globals to reconstruct various types of objects when using `weights_only=True`
# These should be added and then removed after loading in the file
np_core = np._core if is_numpy_available("2.0.0") else np.core
TORCH_SAFE_GLOBALS = [
# numpy arrays are just numbers, not objects, so we can reconstruct them safely
np_core.multiarray._reconstruct,
np.ndarray,
# The following are needed for the RNG states
encode,
np.dtype,
]
if is_numpy_available("1.25.0"):
TORCH_SAFE_GLOBALS.append(np.dtypes.UInt32DType)
def load(f, map_location=None, **kwargs):
"""
Compatible drop-in replacement of `torch.load()` which allows for `weights_only` to be used if `torch` version is
2.4.0 or higher. Otherwise will ignore the kwarg.
Will also add (and then remove) an exception for numpy arrays
Args:
f:
The file (or file-like object) to use to load the data
map_location:
a function, `torch.device`, string or a dict specifying how to remap storage locations
**kwargs:
Additional keyword arguments to pass to `torch.load()`.
"""
try:
if is_weights_only_available():
old_safe_globals = torch.serialization.get_safe_globals()
if "weights_only" not in kwargs:
kwargs["weights_only"] = True
torch.serialization.add_safe_globals(TORCH_SAFE_GLOBALS)
else:
kwargs.pop("weights_only", None)
loaded_obj = torch.load(f, map_location=map_location, **kwargs)
finally:
if is_weights_only_available():
torch.serialization.clear_safe_globals()
if old_safe_globals:
torch.serialization.add_safe_globals(old_safe_globals)
return loaded_obj
def get_pretty_name(obj):
"""
Gets a pretty name from `obj`.
"""
if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"):
obj = getattr(obj, "__class__", obj)
if hasattr(obj, "__qualname__"):
return obj.__qualname__
if hasattr(obj, "__name__"):
return obj.__name__
return str(obj)
def merge_dicts(source, destination):
"""
Recursively merges two dictionaries.
Args:
source (`dict`): The dictionary to merge into `destination`.
destination (`dict`): The dictionary to merge `source` into.
"""
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
merge_dicts(value, node)
else:
destination[key] = value
return destination
def is_port_in_use(port: int = None) -> bool:
"""
Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
run and need to see if the port is already in use.
"""
if port is None:
port = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def convert_bytes(size):
"Converts `size` from bytes to the largest possible unit"
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
return f"{round(size, 2)} {x}"
size /= 1024.0
return f"{round(size, 2)} PB"
def check_os_kernel():
"""Warns if the kernel version is below the recommended minimum on Linux."""
# see issue #1929
info = platform.uname()
system = info.system
if system != "Linux":
return
_, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
min_version = "5.5.0"
if Version(version) < Version(min_version):
msg = (
f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can "
"cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher."
)
logger.warning(msg, main_process_only=True)
def recursive_getattr(obj, attr: str):
"""
Recursive `getattr`.
Args:
obj:
A class instance holding the attribute.
attr (`str`):
The attribute that is to be retrieved, e.g. 'attribute1.attribute2'.
"""
def _getattr(obj, attr):
return getattr(obj, attr)
return reduce(_getattr, [obj] + attr.split("."))
| accelerate/src/accelerate/utils/other.py/0 | {
"file_path": "accelerate/src/accelerate/utils/other.py",
"repo_id": "accelerate",
"token_count": 4783
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from unittest.mock import patch
import torch
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
import accelerate.commands.test as accelerate_test_cmd
from accelerate.commands.config.config_args import BaseConfig, ClusterConfig, SageMakerConfig, load_config_from_file
from accelerate.commands.estimate import estimate_command, estimate_command_parser, gather_data
from accelerate.commands.launch import _validate_launch_command, launch_command, launch_command_parser
from accelerate.commands.tpu import tpu_command_launcher, tpu_command_parser
from accelerate.test_utils.testing import (
capture_call_output,
path_in_accelerate_package,
require_multi_device,
require_timm,
require_transformers,
run_command,
)
from accelerate.utils import patch_environment
from accelerate.utils.launch import prepare_simple_launcher_cmd_env
class AccelerateLauncherTester(unittest.TestCase):
"""
Test case for verifying the `accelerate launch` CLI operates correctly.
If a `default_config.yaml` file is located in the cache it will temporarily move it
for the duration of the tests.
"""
test_file_path = path_in_accelerate_package("test_utils", "scripts", "test_cli.py")
notebook_launcher_path = path_in_accelerate_package("test_utils", "scripts", "test_notebook.py")
config_folder = Path.home() / ".cache/huggingface/accelerate"
config_file = "default_config.yaml"
config_path = config_folder / config_file
changed_path = config_folder / "_default_config.yaml"
test_config_path = Path("tests/test_configs")
parser = launch_command_parser()
@classmethod
def setUpClass(cls):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def tearDownClass(cls):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def test_no_config(self):
args = ["--monitor_interval", "0.1", str(self.test_file_path)]
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
args = ["--multi_gpu"] + args
args = self.parser.parse_args(["--monitor_interval", "0.1", str(self.test_file_path)])
launch_command(args)
def test_config_compatibility(self):
invalid_configs = ["fp8", "invalid", "mpi", "sagemaker"]
for config in sorted(self.test_config_path.glob("**/*.yaml")):
if any(invalid_config in str(config) for invalid_config in invalid_configs):
continue
with self.subTest(config_file=config):
args = self.parser.parse_args(["--config_file", str(config), str(self.test_file_path)])
launch_command(args)
def test_invalid_keys(self):
config_path = self.test_config_path / "invalid_keys.yaml"
with self.assertRaises(
ValueError,
msg="The config file at 'invalid_keys.yaml' had unknown keys ('another_invalid_key', 'invalid_key')",
):
args = self.parser.parse_args(["--config_file", str(config_path), str(self.test_file_path)])
launch_command(args)
def test_accelerate_test(self):
args = accelerate_test_cmd.test_command_parser().parse_args([])
accelerate_test_cmd.test_command(args)
@require_multi_device
def test_notebook_launcher(self):
"""
This test checks a variety of situations and scenarios
with the `notebook_launcher`
"""
cmd = ["python", self.notebook_launcher_path]
with patch_environment(omp_num_threads=1, accelerate_num_processes=2):
run_command(cmd)
def test_mpi_multicpu_config_cmd(self):
"""
Parses a launch command with a test file and the 0_28_0_mpi.yaml config. Tests getting the command and
environment vars and verifies the mpirun command arg values.
"""
mpi_config_path = str(self.test_config_path / "0_28_0_mpi.yaml")
test_file_arg = "--cpu"
with patch("sys.argv", ["accelerate", str(self.test_file_path), test_file_arg]):
parser = launch_command_parser()
args = parser.parse_args()
args.config_file = mpi_config_path
args, _, _ = _validate_launch_command(args)
# Mock out the check for mpirun version to simulate Intel MPI
with patch("accelerate.utils.launch.which", return_value=True):
with patch("accelerate.utils.launch.subprocess.check_output", return_value=b"Intel MPI"):
cmd, _ = prepare_simple_launcher_cmd_env(args)
# Verify the mpirun command args
expected_mpirun_cmd = ["mpirun", "-f", "/home/user/hostfile", "-ppn", "4", "-n", "16"]
self.assertGreater(len(cmd), len(expected_mpirun_cmd))
generated_mpirun_cmd = cmd[0 : len(expected_mpirun_cmd)]
self.assertEqual(expected_mpirun_cmd, generated_mpirun_cmd)
# Verify the python script and args in the mpirun command
python_script_cmd = cmd[len(expected_mpirun_cmd) :]
self.assertEqual(len(python_script_cmd), 3)
self.assertEqual(python_script_cmd[1], str(self.test_file_path))
self.assertEqual(python_script_cmd[2], test_file_arg)
class LaunchArgTester(unittest.TestCase):
"""
Test cases revolving around the CLI wrappers
"""
parser = launch_command_parser()
def test_hyphen(self):
# Try a little from each cluster
args = ["--config-file", "test.yaml", "test.py"]
result = self.parser.parse_args(args)
assert result.config_file == "test.yaml"
assert result.multi_gpu is False
args = ["--multi-gpu", "--num-processes", "4", "test.py"]
result = self.parser.parse_args(args)
assert result.multi_gpu is True
assert result.num_processes == 4
# And use a mix
args = ["--multi-gpu", "--use-deepspeed", "--use-fsdp", "--num_processes", "4", "test.py"]
result = self.parser.parse_args(args)
assert result.multi_gpu is True
assert result.use_deepspeed is True
assert result.use_fsdp is True
assert result.num_processes == 4
def test_underscore(self):
# Try a little from each cluster
args = ["--config_file", "test.yaml", "test.py"]
result = self.parser.parse_args(args)
assert result.config_file == "test.yaml"
args = ["--multi_gpu", "--num_processes", "4", "test.py"]
result = self.parser.parse_args(args)
assert result.multi_gpu is True
assert result.num_processes == 4
# And use a mix
args = ["--multi_gpu", "--use_deepspeed", "--use_fsdp", "--num-processes", "4", "test.py"]
result = self.parser.parse_args(args)
assert result.multi_gpu is True
assert result.use_deepspeed is True
assert result.use_fsdp is True
assert result.num_processes == 4
def test_duplicate_entities(self):
help_return = self.parser.format_help()
args = self.parser.parse_args(["test.py"])
for arg in args.__dict__:
if "_" in arg:
bad_arg = f'--{arg.replace("_", "-")}'
# Need an exception for `num-processes` since it's in the docstring
if bad_arg == "--num-processes":
assert help_return.count(bad_arg) == 1, f"Found {bad_arg} in `accelerate launch -h`"
else:
assert bad_arg not in help_return, f"Found {bad_arg} in `accelerate launch -h`"
class ClusterConfigTester(unittest.TestCase):
"""
Test case for verifying the config dataclasses work
"""
test_config_path = Path("tests/test_configs")
def test_base_config(self):
# Tests that all the dataclasses can be initialized
config = BaseConfig(
compute_environment="LOCAL_MACHINE",
distributed_type="NO",
mixed_precision="fp16",
debug=False,
use_cpu=False,
)
assert config.compute_environment == "LOCAL_MACHINE"
assert config.distributed_type == "NO"
assert config.mixed_precision == "fp16"
assert config.debug is False
def test_cluster_config(self):
# First normally
config = ClusterConfig(
compute_environment="LOCAL_MACHINE",
distributed_type="NO",
mixed_precision="fp16",
num_processes=2,
debug=False,
use_cpu=False,
)
assert config.compute_environment == "LOCAL_MACHINE"
assert config.distributed_type == "NO"
assert config.mixed_precision == "fp16"
assert config.debug is False
# Then check with other compute environments
config = ClusterConfig(
compute_environment="LOCAL_MACHINE",
distributed_type="MULTI_GPU",
mixed_precision="fp16",
debug=False,
num_processes=2,
enable_cpu_affinity=True,
use_cpu=False,
)
assert config.distributed_type == "MULTI_GPU"
assert config.num_processes == 2
assert config.enable_cpu_affinity is True
def test_sagemaker_config(self):
config = SageMakerConfig(
compute_environment="AMAZON_SAGEMAKER",
distributed_type="NO",
mixed_precision="fp16",
debug=False,
use_cpu=False,
ec2_instance_type="MY_TYPE",
iam_role_name="MY_ROLE",
)
assert config.compute_environment == "AMAZON_SAGEMAKER"
assert config.ec2_instance_type == "MY_TYPE"
assert config.iam_role_name == "MY_ROLE"
config = load_config_from_file(str(self.test_config_path / "0_30_0_sagemaker.yaml"))
class TpuConfigTester(unittest.TestCase):
"""
Test case for verifying the `accelerate tpu-config` CLI passes the right `gcloud` command.
"""
tpu_name = "test-tpu"
tpu_zone = "us-central1-a"
command = "ls"
cmd = ["accelerate", "tpu-config"]
base_output = "cd /usr/share"
command_file = "tests/test_samples/test_command_file.sh"
gcloud = "Running gcloud compute tpus tpu-vm ssh"
def setUp(self):
self.parser = tpu_command_parser()
def test_base(self):
args = self.parser.parse_args(
["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"]
)
output = capture_call_output(tpu_command_launcher, args)
assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output
def test_base_backward_compatibility(self):
args = self.parser.parse_args(
[
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
]
)
output = capture_call_output(tpu_command_launcher, args)
assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output
def test_with_config_file(self):
args = self.parser.parse_args(["--config_file", "tests/test_configs/latest.yaml", "--debug"])
output = capture_call_output(tpu_command_launcher, args)
assert (
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all'
in output
)
def test_with_config_file_and_command(self):
args = self.parser.parse_args(
["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"]
)
output = capture_call_output(tpu_command_launcher, args)
assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output
def test_with_config_file_and_multiple_command(self):
args = self.parser.parse_args(
[
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
'echo "Hello World"',
"--debug",
]
)
output = capture_call_output(tpu_command_launcher, args)
assert (
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all'
in output
)
def test_with_config_file_and_command_file(self):
args = self.parser.parse_args(
["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"]
)
output = capture_call_output(tpu_command_launcher, args)
assert (
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all'
in output
)
def test_with_config_file_and_command_file_backward_compatibility(self):
args = self.parser.parse_args(
[
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
]
)
output = capture_call_output(tpu_command_launcher, args)
assert (
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all'
in output
)
def test_accelerate_install(self):
args = self.parser.parse_args(
["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"]
)
output = capture_call_output(tpu_command_launcher, args)
assert (
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all'
in output
)
def test_accelerate_install_version(self):
args = self.parser.parse_args(
[
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
]
)
output = capture_call_output(tpu_command_launcher, args)
assert (
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all'
in output
)
class ModelEstimatorTester(unittest.TestCase):
"""
Test case for checking the output of `accelerate estimate-memory` is correct.
- Uses `estimate_command` when trying to catch raised errors
- Uses `gather_data` when just verifying the calculations are correct
"""
parser = estimate_command_parser()
def test_invalid_model_name(self):
with self.assertRaises(
RepositoryNotFoundError, msg="Repo for model `somebrokenname` does not exist on the Hub"
):
args = self.parser.parse_args(["somebrokenname"])
estimate_command(args)
@require_timm
def test_invalid_model_name_timm(self):
with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `timm` but"):
args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "timm"])
estimate_command(args)
@require_transformers
def test_invalid_model_name_transformers(self):
with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `transformers` but"):
args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "transformers"])
estimate_command(args)
def test_no_metadata(self):
with self.assertRaises(
ValueError, msg="Model `muellerzr/dummy` does not have any library metadata on the Hub"
):
args = self.parser.parse_args(["muellerzr/dummy"])
estimate_command(args)
def test_gated(self):
with self.assertRaises(
(GatedRepoError, EnvironmentError),
msg="Repo for model `meta-llama/Llama-2-7b-hf` is gated or environment error occurred",
):
args = self.parser.parse_args(["meta-llama/Llama-2-7b-hf"])
with patch_environment(hf_hub_disable_implicit_token="1"):
estimate_command(args)
@require_transformers
def test_remote_code(self):
# Also tests that custom `Auto` classes work
args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model"])
with self.assertRaises(ValueError, msg="--trust_remote_code"):
gather_data(args)
# Verify it works with the flag
args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model", "--trust_remote_code"])
gather_data(args)
@require_transformers
def test_explicit_dtypes(self):
args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32", "float16"])
output = gather_data(args)
# The largest layer and total size of the model in bytes
largest_layer, total_size = 90669056, 433249280
# Check that full precision -> int4 is calculating correctly
assert len(output) == 2, f"Output was missing a precision, expected 2 but received {len(output)}"
for i, factor in enumerate([1, 2]):
precision = 32 // factor
precision_str = f"float{precision}"
largest_layer_estimate = largest_layer / factor
total_size_estimate = total_size / factor
total_training_size_estimate = total_size_estimate * 4
assert precision_str == output[i][0], f"Output is missing precision `{precision_str}`"
assert (
largest_layer_estimate == output[i][1]
), f"Calculation for largest layer size in `{precision_str}` is incorrect."
assert (
total_size_estimate == output[i][2]
), f"Calculation for total size in `{precision_str}` is incorrect."
assert total_training_size_estimate == max(
output[i][3].values()
), f"Calculation for total training size in `{precision_str}` is incorrect."
@require_transformers
def test_transformers_model(self):
args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32"])
output = gather_data(args)
# The largest layer and total size of the model in bytes
largest_layer, total_size = 90669056, 433249280
assert (
largest_layer == output[0][1]
), f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}"
assert (
total_size == output[0][2]
), f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}"
@require_transformers
def test_no_split_modules(self):
# idefics-80b-instruct has ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
args = self.parser.parse_args(["HuggingFaceM4/idefics-80b-instruct", "--dtypes", "float32"])
output = gather_data(args)
# without factoring in `no_split` modules, the largest layer is 721420288 bytes
assert output[0][1] != 721420288, "Largest layer calculation incorrect, did not factor in `no_split` modules."
# the real answer is 3240165632 bytes
assert output[0][1] == 3240165632
@require_timm
def test_timm_model(self):
args = self.parser.parse_args(["timm/resnet50.a1_in1k", "--library_name", "timm"])
output = gather_data(args)
# The largest layer and total size of the model in bytes
largest_layer, total_size = 9437184, 102441032
assert (
largest_layer == output[0][1]
), f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}"
assert (
total_size == output[0][2]
), f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}"
| accelerate/tests/test_cli.py/0 | {
"file_path": "accelerate/tests/test_cli.py",
"repo_id": "accelerate",
"token_count": 9247
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
import pytest
from accelerate import Accelerator
from accelerate.logging import get_logger
def current_lineno() -> int:
# A simple helper that returns the lineno of its call-site.
caller_frame = inspect.currentframe().f_back
caller_info = inspect.getframeinfo(caller_frame)
return caller_info.lineno
class CustomLogger(logging.LoggerAdapter):
# Mocks a user-defined custom logger wrapper that sets `stacklevel=3`.
def log(self, level, msg, *args, **kwargs):
# E.g. the user wants to modify `stacklevel`, `accelerate.logging`
# should respect the user's `stacklevel`. For the specific value
# of `3`, calling `CustomLogger.log()`, etc., should log that callsite,
# rather than the callsite of the following `self.logger.log()`.
kwargs["stacklevel"] = 3
self.logger.log(level, msg, *args, **kwargs)
@pytest.fixture(scope="module")
def accelerator():
return Accelerator()
@pytest.mark.usefixtures("accelerator")
def test_log_stack(caplog):
logger = get_logger(__name__)
logging.basicConfig(
format="%(filename)s:%(name)s:%(lineno)s:%(funcName)s - %(message)s",
datefmt="%m/%d %H:%M:%S",
)
message = "Test"
lineno = current_lineno() + 1 # the next line is the actual callsite
logger.warning(message)
assert len(caplog.records) == 1
rec = caplog.records[0]
assert rec.levelname == logging.getLevelName(logging.WARNING)
assert rec.filename == os.path.basename(__file__)
assert rec.name == __name__
assert rec.lineno == lineno
assert rec.funcName == test_log_stack.__name__
assert rec.message == message
@pytest.mark.usefixtures("accelerator")
def test_custom_stacklevel(caplog):
wrapped_logger = get_logger(__name__)
logging.basicConfig(
format="%(filename)s:%(name)s:%(lineno)s:%(funcName)s - %(message)s",
datefmt="%m/%d %H:%M:%S",
)
logger = CustomLogger(wrapped_logger, {})
message = "Test"
lineno = current_lineno() + 1 # the next line is the actual callsite
logger.warning(message)
# `CustomLogger.log` set custom `stacklevel=3`, so `logger.warning` should
# log its callsite (rather than those of the `warpped_logger`).
assert len(caplog.records) == 1
rec = caplog.records[0]
assert rec.levelname == logging.getLevelName(logging.WARNING)
assert rec.filename == os.path.basename(__file__)
assert rec.name == __name__
assert rec.lineno == lineno
assert rec.funcName == test_custom_stacklevel.__name__
assert rec.message == message
| accelerate/tests/test_logging.py/0 | {
"file_path": "accelerate/tests/test_logging.py",
"repo_id": "accelerate",
"token_count": 1156
} |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import tempfile
import unittest
import warnings
from collections import UserDict, namedtuple
from typing import NamedTuple, Optional
from unittest.mock import Mock, patch
import numpy as np
import pytest
import torch
from torch import nn
from accelerate.big_modeling import cpu_offload_with_hook
from accelerate.hooks import attach_align_device_hook, remove_hook_from_module
from accelerate.state import PartialState
from accelerate.test_utils.testing import (
require_huggingface_suite,
require_non_cpu,
require_non_torch_xla,
require_torch_min_version,
require_tpu,
require_triton,
torch_device,
)
from accelerate.test_utils.training import RegressionModel
from accelerate.utils import (
CannotPadNestedTensorWarning,
check_os_kernel,
clear_environment,
convert_dict_to_env_variables,
convert_outputs_to_fp32,
convert_to_fp32,
extract_model_from_parallel,
find_device,
has_offloaded_params,
is_torch_xla_available,
listify,
pad_across_processes,
pad_input_tensors,
patch_environment,
purge_accelerate_environment,
recursively_apply,
save,
send_to_device,
)
from accelerate.utils.operations import is_namedtuple
if is_torch_xla_available():
import torch_xla.distributed.spmd as xs
import torch_xla.runtime as xr
from torch_xla.experimental.spmd_fully_sharded_data_parallel import SpmdFullyShardedDataParallel as FSDPv2
ExampleNamedTuple = namedtuple("ExampleNamedTuple", "a b c")
class UtilsTester(unittest.TestCase):
def setUp(self):
# logging requires initialized state
PartialState()
def test_send_to_device(self):
tensor = torch.randn(5, 2)
device = torch.device(f"{torch_device}:0")
result1 = send_to_device(tensor, device)
assert torch.equal(result1.cpu(), tensor)
result2 = send_to_device((tensor, [tensor, tensor], 1), device)
assert isinstance(result2, tuple)
assert torch.equal(result2[0].cpu(), tensor)
assert isinstance(result2[1], list)
assert torch.equal(result2[1][0].cpu(), tensor)
assert torch.equal(result2[1][1].cpu(), tensor)
assert result2[2] == 1
result2 = send_to_device({"a": tensor, "b": [tensor, tensor], "c": 1}, device)
assert isinstance(result2, dict)
assert torch.equal(result2["a"].cpu(), tensor)
assert isinstance(result2["b"], list)
assert torch.equal(result2["b"][0].cpu(), tensor)
assert torch.equal(result2["b"][1].cpu(), tensor)
assert result2["c"] == 1
result3 = send_to_device(ExampleNamedTuple(a=tensor, b=[tensor, tensor], c=1), device)
assert isinstance(result3, ExampleNamedTuple)
assert torch.equal(result3.a.cpu(), tensor)
assert isinstance(result3.b, list)
assert torch.equal(result3.b[0].cpu(), tensor)
assert torch.equal(result3.b[1].cpu(), tensor)
assert result3.c == 1
result4 = send_to_device(UserDict({"a": tensor, "b": [tensor, tensor], "c": 1}), device)
assert isinstance(result4, UserDict)
assert torch.equal(result4["a"].cpu(), tensor)
assert isinstance(result4["b"], list)
assert torch.equal(result4["b"][0].cpu(), tensor)
assert torch.equal(result4["b"][1].cpu(), tensor)
assert result4["c"] == 1
def test_honor_type(self):
with self.assertRaises(TypeError) as cm:
_ = recursively_apply(torch.tensor, (torch.tensor(1), 1), error_on_other_type=True)
assert (
str(cm.exception)
== "Unsupported types (<class 'int'>) passed to `tensor`. Only nested list/tuple/dicts of objects that are valid for `is_torch_tensor` should be passed."
)
def test_listify(self):
tensor = torch.tensor([1, 2, 3, 4, 5])
assert listify(tensor) == [1, 2, 3, 4, 5]
tensor = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
assert listify(tensor) == [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
tensor = torch.tensor([[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], [[11, 12, 13, 14, 15], [16, 17, 18, 19, 20]]])
assert listify(tensor) == [[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], [[11, 12, 13, 14, 15], [16, 17, 18, 19, 20]]]
def test_patch_environment(self):
with patch_environment(aa=1, BB=2):
assert os.environ.get("AA") == "1"
assert os.environ.get("BB") == "2"
assert "AA" not in os.environ
assert "BB" not in os.environ
def test_patch_environment_key_exists(self):
# check that patch_environment correctly restores pre-existing env vars
with patch_environment(aa=1, BB=2):
assert os.environ.get("AA") == "1"
assert os.environ.get("BB") == "2"
with patch_environment(Aa=10, bb="20", cC=30):
assert os.environ.get("AA") == "10"
assert os.environ.get("BB") == "20"
assert os.environ.get("CC") == "30"
assert os.environ.get("AA") == "1"
assert os.environ.get("BB") == "2"
assert "CC" not in os.environ
assert "AA" not in os.environ
assert "BB" not in os.environ
assert "CC" not in os.environ
def test_patch_environment_restores_on_error(self):
# we need to find an upper-case envvar
# because `patch_environment upper-cases all keys...
key, orig_value = next(kv for kv in os.environ.items() if kv[0].isupper())
new_value = f"{orig_value}_foofoofoo"
with pytest.raises(RuntimeError), patch_environment(**{key: new_value}):
assert os.environ[key] == os.getenv(key) == new_value # noqa: TID251
raise RuntimeError("Oopsy daisy!")
assert os.environ[key] == os.getenv(key) == orig_value # noqa: TID251
def test_clear_environment(self):
key, value = os.environ.copy().popitem()
with pytest.raises(RuntimeError), clear_environment():
assert key not in os.environ
assert not os.getenv(key) # test the environment is actually cleared # noqa: TID251
raise RuntimeError("Oopsy daisy!")
# Test values are restored
assert os.getenv(key) == os.environ[key] == value # noqa: TID251
def test_can_undo_convert_outputs(self):
model = RegressionModel()
model._original_forward = model.forward
model.forward = convert_outputs_to_fp32(model.forward)
model = extract_model_from_parallel(model, keep_fp32_wrapper=False)
_ = pickle.dumps(model)
@require_non_cpu
def test_can_undo_fp16_conversion(self):
model = RegressionModel()
model._original_forward = model.forward
model.forward = torch.autocast(device_type=torch_device, dtype=torch.float16)(model.forward)
model.forward = convert_outputs_to_fp32(model.forward)
model = extract_model_from_parallel(model, keep_fp32_wrapper=False)
_ = pickle.dumps(model)
@require_triton
@require_non_cpu
def test_dynamo(self):
model = RegressionModel()
model._original_forward = model.forward
model.forward = torch.autocast(device_type=torch_device, dtype=torch.float16)(model.forward)
model.forward = convert_outputs_to_fp32(model.forward)
model.forward = torch.compile(model.forward, backend="inductor")
inputs = torch.randn(4, 10).to(torch_device)
_ = model(inputs)
def test_extract_model(self):
model = RegressionModel()
# could also do a test with DistributedDataParallel, but difficult to run on CPU or single GPU
distributed_model = torch.nn.parallel.DataParallel(model)
model_unwrapped = extract_model_from_parallel(distributed_model)
assert model == model_unwrapped
@require_tpu
@require_huggingface_suite
def test_extract_model_recursive_fsdpv2(self):
# Specifically tests for FSDPv2 extraction
# reported in https://github.com/huggingface/transformers/pull/29780
xr.use_spmd()
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("gpt2")
orig_state_dict_keys = list(model.state_dict().keys())
num_devices = xr.global_runtime_device_count()
# Set environment for FSDPv2 to be active
xs.set_global_mesh(xs.Mesh(np.array(range(num_devices)), (num_devices, 1), axis_names=("fsdp", "tensor")))
def nested_wrap(model):
layer = model.wte
wrapped_layer = FSDPv2(layer)
model.wte = wrapped_layer
return model
wrapped_model = nested_wrap(model)
unwrapped_model = extract_model_from_parallel(wrapped_model, recursive=True)
unwrapped_state_dict_keys = list(unwrapped_model.state_dict().keys())
for original_key, new_key in zip(orig_state_dict_keys, unwrapped_state_dict_keys):
assert original_key == new_key, f"Keys did not align: {original_key} != {new_key}"
def test_dynamo_extract_model_keep_torch_compile(self):
model = RegressionModel()
compiled_model = torch.compile(model)
# could also do a test with DistributedDataParallel, but difficult to run on CPU or single GPU
distributed_model = torch.nn.parallel.DataParallel(model)
distributed_compiled_model = torch.compile(distributed_model)
compiled_model_unwrapped = extract_model_from_parallel(distributed_compiled_model, keep_torch_compile=True)
assert compiled_model._orig_mod == compiled_model_unwrapped._orig_mod
def test_dynamo_extract_model_remove_torch_compile(self):
model = RegressionModel()
compiled_model = torch.compile(model)
# could also do a test with DistributedDataParallel, but difficult to run on CPU or single GPU
distributed_model = torch.nn.parallel.DataParallel(model)
distributed_compiled_model = torch.compile(distributed_model)
compiled_model_unwrapped = extract_model_from_parallel(distributed_compiled_model, keep_torch_compile=False)
assert compiled_model._orig_mod == compiled_model_unwrapped
def test_find_device(self):
assert find_device([1, "a", torch.tensor([1, 2, 3])]) == torch.device("cpu")
assert find_device({"a": 1, "b": torch.tensor([1, 2, 3])}) == torch.device("cpu")
assert find_device([1, "a"]) is None
def test_check_os_kernel_no_warning_when_release_gt_min(self):
# min version is 5.5
with patch("platform.uname", return_value=Mock(release="5.15.0-35-generic", system="Linux")):
with warnings.catch_warnings(record=True) as w:
check_os_kernel()
assert len(w) == 0
def test_check_os_kernel_no_warning_when_not_linux(self):
# system must be Linux
with patch("platform.uname", return_value=Mock(release="5.4.0-35-generic", system="Darwin")):
with warnings.catch_warnings(record=True) as w:
check_os_kernel()
assert len(w) == 0
def test_check_os_kernel_warning_when_release_lt_min(self):
# min version is 5.5
with patch("platform.uname", return_value=Mock(release="5.4.0-35-generic", system="Linux")):
with self.assertLogs() as ctx:
check_os_kernel()
assert len(ctx.records) == 1
assert ctx.records[0].levelname == "WARNING"
assert "5.4.0" in ctx.records[0].msg
assert "5.5.0" in ctx.records[0].msg
@require_non_torch_xla
def test_save_safetensor_shared_memory(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(100, 100)
self.b = self.a
def forward(self, x):
return self.b(self.a(x))
model = Model()
with tempfile.TemporaryDirectory() as tmp_dir:
save_path = os.path.join(tmp_dir, "model.safetensors")
with self.assertLogs(level="WARNING") as log:
save(model.state_dict(), save_path, safe_serialization=True)
assert len(log.records) == 1
assert "Removed shared tensor" in log.output[0]
@require_torch_min_version(version="1.12")
def test_pad_across_processes(self):
from torch.nested import nested_tensor
nt = nested_tensor([[1, 2, 3], [1], [1, 2]])
with self.assertWarns(CannotPadNestedTensorWarning):
nt2 = pad_across_processes(nt)
assert nt is nt2
# Basic functionality
tensor = torch.randn(4, 3, 100)
padded_tensor = pad_across_processes(tensor, dim=-1)
assert padded_tensor.shape[-1] == 100
# dim = -4 is out of bounds
padded_tensor = pad_across_processes(tensor, dim=-4)
assert padded_tensor is tensor
def test_slice_and_concatenate(self):
# First base case: 2 processes, batch size of 1
num_processes = 2
batch_size = 1
batch = torch.rand(batch_size, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 2 items now
assert result.shape == torch.Size([2, 4])
# Second base case: 2 processes, batch size of 3
num_processes = 2
batch_size = 3
batch = torch.rand(batch_size, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 4 items now
assert result.shape == torch.Size([4, 4])
# Third base case: 3 processes, batch size of 4
num_processes = 3
batch_size = 4
batch = torch.rand(batch_size, 4, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 6 items now
assert result.shape == torch.Size([6, 4, 4])
# Fourth base case: 4 processes, batch size of 3
num_processes = 4
batch_size = 3
batch = torch.rand(batch_size, 4, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 4 items now
assert result.shape == torch.Size([4, 4, 4])
# Fifth base case: 6 processes, batch size of 4
num_processes = 6
batch_size = 4
batch = torch.rand(batch_size, 4, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 6 items now
assert result.shape == torch.Size([6, 4, 4])
# Sixth base case: 6 processes, batch size of 1
num_processes = 6
batch_size = 1
batch = torch.rand(batch_size, 4, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 6 items now
assert result.shape == torch.Size([6, 4, 4])
# Seventh base case: 6 processes, batch size of 2
num_processes = 6
batch_size = 2
batch = torch.rand(batch_size, 4, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 6 items now
assert result.shape == torch.Size([6, 4, 4])
# Eighth base case: 6 processes, batch size of 61
num_processes = 6
batch_size = 61
batch = torch.rand(batch_size, 4, 4)
result = pad_input_tensors(batch, batch_size, num_processes)
# We should expect there to be 66 items now
assert result.shape == torch.Size([66, 4, 4])
def test_send_to_device_compiles(self):
compiled_send_to_device = torch.compile(send_to_device, fullgraph=True)
compiled_send_to_device(torch.zeros([1], dtype=torch.bfloat16), "cpu")
def test_convert_to_fp32(self):
compiled_convert_to_fp32 = torch.compile(convert_to_fp32, fullgraph=True)
compiled_convert_to_fp32(torch.zeros([1], dtype=torch.bfloat16))
def test_named_tuples(self):
class QuantTensorBase(NamedTuple):
value: torch.Tensor
scale: Optional[torch.Tensor]
zero_point: Optional[torch.Tensor]
class Second(QuantTensorBase):
pass
a = QuantTensorBase(torch.tensor(1.0), None, None)
b = Second(torch.tensor(1.0), None, None)
point = namedtuple("Point", ["x", "y"])
p = point(11, y=22)
self.assertTrue(is_namedtuple(a))
self.assertTrue(is_namedtuple(b))
self.assertTrue(is_namedtuple(p))
self.assertFalse(is_namedtuple((1, 2)))
self.assertFalse(is_namedtuple("hey"))
self.assertFalse(is_namedtuple(object()))
def test_convert_dict_to_env_variables(self):
env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"}
with self.assertLogs("accelerate.utils.environment", level="WARNING"):
valid_env_items = convert_dict_to_env_variables(env)
assert valid_env_items == ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"]
def test_has_offloaded_params(self):
model = RegressionModel()
assert not has_offloaded_params(model)
attach_align_device_hook(model, offload=False)
assert not has_offloaded_params(model)
remove_hook_from_module(model)
model, _ = cpu_offload_with_hook(model)
assert not has_offloaded_params(model)
remove_hook_from_module(model)
attach_align_device_hook(model, offload=True)
assert has_offloaded_params(model)
def set_dummy_accelerate_env_var():
"""Set an accelerate env var
This class emulates the behavior of, for instance, transformers.TrainingArguments, which is allowed to set
accelerate env vars but does not clean them up. E.g.
TrainingArguments(fp16=True, output_dir="/tmp/test")
leaves ACCELERATE_MIXED_PRECISION=fp16 as an env var.
"""
os.environ["ACCELERATE_SOME_ENV_VAR"] = "true"
@purge_accelerate_environment
class MyUnittest(unittest.TestCase):
def test_purge_env_vars_unittest_1(self):
os.environ.pop("ACCELERATE_SOME_ENV_VAR", None)
set_dummy_accelerate_env_var()
assert "ACCELERATE_SOME_ENV_VAR" in os.environ
def test_purge_env_vars_unittest_2(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@unittest.skipIf(False, "dummy unittest wrapper")
@purge_accelerate_environment
@unittest.skipUnless(True, "dummy unittest wrapper")
class MyUnittestWithDecorators(unittest.TestCase):
def test_purge_env_vars_unittest_with_wrapper_1(self):
os.environ.pop("ACCELERATE_SOME_ENV_VAR", None)
set_dummy_accelerate_env_var()
assert "ACCELERATE_SOME_ENV_VAR" in os.environ
def test_purge_env_vars_unittest_with_wrapper_2(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@unittest.skipIf(False, "dummy unittest wrapper")
def test_purge_env_vars_unittest_with_wrapper_3(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@unittest.skipIf(True, "this is always skipped")
def test_purge_env_vars_unittest_with_wrapper_4(self):
# ensure that unittest markers still do their job
assert False
@purge_accelerate_environment
class _BaseCls(unittest.TestCase):
def test_purge_env_vars_unittest_with_inheritance_3(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
class MyUnittestWithInheritance(_BaseCls):
def test_purge_env_vars_unittest_with_inheritance_1(self):
os.environ.pop("ACCELERATE_SOME_ENV_VAR", None)
set_dummy_accelerate_env_var()
assert "ACCELERATE_SOME_ENV_VAR" in os.environ
def test_purge_env_vars_unittest_with_inheritance_2(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@purge_accelerate_environment
class TestMyPytest:
def test_purge_env_vars_pytest_1(self):
os.environ.pop("ACCELERATE_SOME_ENV_VAR", None)
set_dummy_accelerate_env_var()
assert "ACCELERATE_SOME_ENV_VAR" in os.environ
def test_purge_env_vars_pytest_2(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@pytest.fixture
def dummy_fixture():
pass
@pytest.mark.skipif(False, reason="dummy pytest wrapper")
@pytest.mark.usefixtures("dummy_fixture")
@purge_accelerate_environment
@pytest.mark.skipif(False, reason="dummy pytest wrapper")
@pytest.mark.usefixtures("dummy_fixture")
class TestPytestWithWrapper:
def test_purge_env_vars_pytest_with_wrapper_1(self):
os.environ.pop("ACCELERATE_SOME_ENV_VAR", None)
set_dummy_accelerate_env_var()
assert "ACCELERATE_SOME_ENV_VAR" in os.environ
def test_purge_env_vars_pytest_with_wrapper_2(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@pytest.mark.skipif(False, reason="dummy pytest wrapper")
@pytest.mark.usefixtures("dummy_fixture")
def test_purge_env_vars_pytest_with_wrapper_3(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@pytest.mark.skipif(True, reason="this is always skipped")
def test_purge_env_vars_pytest_with_wrapper_4_should_be_skipped(self):
# ensure that pytest markers still do their job
assert False
@purge_accelerate_environment
class _PytestBaseCls:
def test_purge_env_vars_pytest_with_inheritance_3(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
class TestPytestWithInheritance(_PytestBaseCls):
def test_purge_env_vars_pytest_with_inheritance_1(self):
os.environ.pop("ACCELERATE_SOME_ENV_VAR", None)
set_dummy_accelerate_env_var()
assert "ACCELERATE_SOME_ENV_VAR" in os.environ
def test_purge_env_vars_pytest_with_inheritance_2(self):
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
@purge_accelerate_environment
def test_purge_env_vars_standalone_1():
os.environ.pop("ACCELERATE_SOME_ENV_VAR", None)
set_dummy_accelerate_env_var()
assert "ACCELERATE_SOME_ENV_VAR" in os.environ
def test_purge_env_vars_standalone_2():
assert "ACCELERATE_SOME_ENV_VAR" not in os.environ
def test_purge_env_vars_restores_previous_values():
# Ensure that purge_accelerate_environment restores values of previous accelerate env vars and does not delete
# untouched env vars.
@purge_accelerate_environment
def dummy_func():
os.environ["ACCELERATE_SOME_ENV_VAR"] = "456"
os.environ["ACCELERATE_SOME_ENV_VAR"] = "1"
os.environ["ACCELERATE_ANOTHER_ENV_VAR"] = "2"
dummy_func()
assert os.environ["ACCELERATE_SOME_ENV_VAR"] == "1"
assert os.environ["ACCELERATE_ANOTHER_ENV_VAR"] == "2"
del os.environ["ACCELERATE_SOME_ENV_VAR"]
del os.environ["ACCELERATE_ANOTHER_ENV_VAR"]
| accelerate/tests/test_utils.py/0 | {
"file_path": "accelerate/tests/test_utils.py",
"repo_id": "accelerate",
"token_count": 10075
} |
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| candle/LICENSE-MIT/0 | {
"file_path": "candle/LICENSE-MIT",
"repo_id": "candle",
"token_count": 263
} |
# Training
Training starts with data. We're going to use the huggingface hub and
start with the Hello world dataset of machine learning, MNIST.
Let's start with downloading `MNIST` from [huggingface](https://huggingface.co/datasets/mnist).
This requires [`hf-hub`](https://github.com/huggingface/hf-hub).
```bash
cargo add hf-hub
```
This is going to be very hands-on for now.
```rust,ignore
{{#include ../../../candle-examples/src/lib.rs:book_training_1}}
```
This uses the standardized `parquet` files from the `refs/convert/parquet` branch on every dataset.
Our handles are now [`parquet::file::serialized_reader::SerializedFileReader`].
We can inspect the content of the files with:
```rust,ignore
{{#include ../../../candle-examples/src/lib.rs:book_training_2}}
```
You should see something like:
```bash
Column id 1, name label, value 6
Column id 0, name image, value {bytes: [137, ....]
Column id 1, name label, value 8
Column id 0, name image, value {bytes: [137, ....]
```
So each row contains 2 columns (image, label) with image being saved as bytes.
Let's put them into a useful struct.
| candle/candle-book/src/training/training.md/0 | {
"file_path": "candle/candle-book/src/training/training.md",
"repo_id": "candle",
"token_count": 361
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::str::FromStr;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn cos_sin(n: usize, device: &Device) -> Result<Tensor> {
let thetas: Vec<_> = (0..n).map(|i| (i as f32 / n as f32)).collect();
let xs: Vec<_> = thetas.iter().map(|t| t.cos().abs()).collect();
let ys: Vec<_> = thetas.iter().map(|t| t.sin().abs()).collect();
let xs = Tensor::from_vec(xs, (n, 1), device)?;
let ys = Tensor::from_vec(ys, (1, n), device)?;
let ys = Tensor::cat(&[&ys, &ys, &ys, &ys, &ys, &ys], 1)?;
Ok(xs.matmul(&ys)?)
}
fn main() -> Result<()> {
let device = Device::new_cuda(0)?;
let args = std::env::args().collect::<Vec<String>>();
let n = if args.len() < 2 {
2000usize
} else {
usize::from_str(&args[1])?
};
let xys_cpu = cos_sin(n, &Device::Cpu)?;
let xys = cos_sin(n, &device)?;
println!("{xys_cpu:?} {xys:?}");
let sum_keepdim_cpu = xys_cpu.sum_keepdim(1)?;
println!("{sum_keepdim_cpu}");
let sum_keepdim = xys.sum_keepdim(1)?;
println!("{sum_keepdim}");
let start = std::time::Instant::now();
let n_iters = 100;
let mut v = 0f32;
for _i in 0..n_iters {
let sum_keepdim = xys.sum_keepdim(1)?;
let sum_keepdim = sum_keepdim.sum_keepdim(0)?;
let sum_keepdim: f32 = sum_keepdim.reshape(&[])?.to_scalar()?;
v += sum_keepdim;
}
let elapsed = start.elapsed();
if v > 0. {
println!(
"ran {n_iters} iterations, time per iter: {:?} ({v})",
elapsed.div_f64(n_iters as f64)
);
}
Ok(())
}
| candle/candle-core/examples/cuda_sum_benchmark.rs/0 | {
"file_path": "candle/candle-core/examples/cuda_sum_benchmark.rs",
"repo_id": "candle",
"token_count": 827
} |
use crate::backend::BackendDevice;
use crate::{CpuStorage, CpuStorageRef, DType, Layout, Result, Shape};
pub use candle_kernels as kernels;
pub use cudarc;
use cudarc::driver::{CudaFunction, LaunchAsync, LaunchConfig};
use half::{bf16, f16};
use std::sync::{Arc, Mutex};
use super::{CudaError, CudaStorage, CudaStorageSlice, WrapErr};
/// Unique identifier for cuda devices.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct DeviceId(usize);
impl DeviceId {
fn new() -> Self {
// https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805
use std::sync::atomic;
static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1);
Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed))
}
}
struct CudaRng(cudarc::curand::CudaRng);
unsafe impl Send for CudaRng {}
#[derive(Clone)]
pub struct CudaDevice {
id: DeviceId,
device: Arc<cudarc::driver::CudaDevice>,
pub(crate) blas: Arc<cudarc::cublas::CudaBlas>,
curand: Arc<Mutex<CudaRng>>,
}
impl std::fmt::Debug for CudaDevice {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "CudaDevice({:?})", self.id)
}
}
impl std::ops::Deref for CudaDevice {
type Target = Arc<cudarc::driver::CudaDevice>;
fn deref(&self) -> &Self::Target {
&self.device
}
}
impl CudaDevice {
pub fn cuda_device(&self) -> Arc<cudarc::driver::CudaDevice> {
self.device.clone()
}
#[cfg(not(target_arch = "wasm32"))]
pub fn compile(
&self,
func_name: &'static str,
kernel: ug::lang::ssa::Kernel,
) -> Result<CudaFunction> {
let mut buf = vec![];
ug_cuda::code_gen::gen(&mut buf, func_name, &kernel)?;
let cuda_code = String::from_utf8(buf)?;
let opts = cudarc::nvrtc::CompileOptions {
use_fast_math: Some(true),
..Default::default()
};
let ptx = cudarc::nvrtc::safe::compile_ptx_with_opts(cuda_code, opts).w()?;
self.device.load_ptx(ptx, "ug", &[func_name]).w()?;
let func = match self.device.get_func("ug", func_name) {
Some(func) => func,
None => crate::bail!("unknown function ug::{func_name}"),
};
Ok(func)
}
pub fn id(&self) -> DeviceId {
self.id
}
fn const_impl(&self, v: f64, shape: &Shape, dtype: DType) -> Result<CudaStorage> {
let elem_count = shape.elem_count();
let cfg = LaunchConfig::for_num_elems(elem_count as u32);
let slice = match dtype {
DType::U8 => {
// SAFETY: Set later by running the fill kernel.
let data = unsafe { self.alloc::<u8>(elem_count) }.w()?;
let func = self.get_or_load_func("fill_u8", kernels::FILL)?;
let params = (&data, v as u8, elem_count);
unsafe { func.launch(cfg, params) }.w()?;
CudaStorageSlice::U8(data)
}
DType::U32 => {
// SAFETY: Set later by running the fill kernel.
let data = unsafe { self.alloc::<u32>(elem_count) }.w()?;
let func = self.get_or_load_func("fill_u32", kernels::FILL)?;
let params = (&data, v as u32, elem_count);
unsafe { func.launch(cfg, params) }.w()?;
CudaStorageSlice::U32(data)
}
DType::I64 => {
// SAFETY: Set later by running the fill kernel.
let data = unsafe { self.alloc::<i64>(elem_count) }.w()?;
let func = self.get_or_load_func("fill_i64", kernels::FILL)?;
let params = (&data, v as i64, elem_count);
unsafe { func.launch(cfg, params) }.w()?;
CudaStorageSlice::I64(data)
}
DType::BF16 => {
// SAFETY: Set later by running the fill kernel.
let data = unsafe { self.alloc::<bf16>(elem_count) }.w()?;
let func = self.get_or_load_func("fill_bf16", kernels::FILL)?;
let params = (&data, bf16::from_f64(v), elem_count);
unsafe { func.launch(cfg, params) }.w()?;
CudaStorageSlice::BF16(data)
}
DType::F16 => {
// SAFETY: Set later by running the fill kernel.
let data = unsafe { self.alloc::<f16>(elem_count) }.w()?;
let func = self.get_or_load_func("fill_f16", kernels::FILL)?;
let params = (&data, f16::from_f64(v), elem_count);
unsafe { func.launch(cfg, params) }.w()?;
CudaStorageSlice::F16(data)
}
DType::F32 => {
// SAFETY: Set later by running the fill kernel.
let data = unsafe { self.alloc::<f32>(elem_count) }.w()?;
let func = self.get_or_load_func("fill_f32", kernels::FILL)?;
let params = (&data, v as f32, elem_count);
unsafe { func.launch(cfg, params) }.w()?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
// SAFETY: Set later by running the fill kernel.
let data = unsafe { self.alloc::<f64>(elem_count) }.w()?;
let func = self.get_or_load_func("fill_f64", kernels::FILL)?;
let params = (&data, v, elem_count);
unsafe { func.launch(cfg, params) }.w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
pub fn get_or_load_func(&self, module_name: &str, ptx: &'static str) -> Result<CudaFunction> {
if !self.has_func(module_name, module_name) {
// Leaking the string here is a bit sad but we need a &'static str and this is only
// done once per kernel name.
let static_module_name = Box::leak(module_name.to_string().into_boxed_str());
self.load_ptx(ptx.into(), module_name, &[static_module_name])
.map_err(|cuda| CudaError::Load {
cuda,
module_name: module_name.to_string(),
})
.w()?;
}
self.get_func(module_name, module_name)
// Clippy recommends this `ok_or` rather than `ok_or_else` so hopefully the compiler is
// able to only build the error value if needed.
.ok_or(CudaError::MissingKernel {
module_name: module_name.to_string(),
})
.w()
}
}
impl CudaDevice {
pub fn new_with_stream(ordinal: usize) -> Result<Self> {
let device = cudarc::driver::CudaDevice::new_with_stream(ordinal).w()?;
let blas = cudarc::cublas::CudaBlas::new(device.clone()).w()?;
let curand = cudarc::curand::CudaRng::new(299792458, device.clone()).w()?;
Ok(Self {
id: DeviceId::new(),
device,
blas: Arc::new(blas),
curand: Arc::new(Mutex::new(CudaRng(curand))),
})
}
}
impl BackendDevice for CudaDevice {
type Storage = CudaStorage;
fn new(ordinal: usize) -> Result<Self> {
let device = cudarc::driver::CudaDevice::new(ordinal).w()?;
let blas = cudarc::cublas::CudaBlas::new(device.clone()).w()?;
let curand = cudarc::curand::CudaRng::new(299792458, device.clone()).w()?;
Ok(Self {
id: DeviceId::new(),
device,
blas: Arc::new(blas),
curand: Arc::new(Mutex::new(CudaRng(curand))),
})
}
fn set_seed(&self, seed: u64) -> Result<()> {
// We do not call set_seed but instead create a new curand object. This ensures that the
// state will be identical and the same random numbers will be generated.
let mut curand = self.curand.lock().unwrap();
curand.0 = cudarc::curand::CudaRng::new(seed, self.device.clone()).w()?;
Ok(())
}
fn location(&self) -> crate::DeviceLocation {
crate::DeviceLocation::Cuda {
gpu_id: self.device.ordinal(),
}
}
fn same_device(&self, rhs: &Self) -> bool {
self.id == rhs.id
}
fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<CudaStorage> {
let elem_count = shape.elem_count();
let slice = match dtype {
DType::U8 => {
let data = self.alloc_zeros::<u8>(elem_count).w()?;
CudaStorageSlice::U8(data)
}
DType::U32 => {
let data = self.alloc_zeros::<u32>(elem_count).w()?;
CudaStorageSlice::U32(data)
}
DType::I64 => {
let data = self.alloc_zeros::<i64>(elem_count).w()?;
CudaStorageSlice::I64(data)
}
DType::BF16 => {
let data = self.alloc_zeros::<bf16>(elem_count).w()?;
CudaStorageSlice::BF16(data)
}
DType::F16 => {
let data = self.alloc_zeros::<f16>(elem_count).w()?;
CudaStorageSlice::F16(data)
}
DType::F32 => {
let data = self.alloc_zeros::<f32>(elem_count).w()?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let data = self.alloc_zeros::<f64>(elem_count).w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn rand_uniform(&self, shape: &Shape, dtype: DType, lo: f64, up: f64) -> Result<CudaStorage> {
let elem_count = shape.elem_count();
let curand = self.curand.lock().unwrap();
let slice = match dtype {
// TODO: Add support for F16 and BF16 though this is likely to require some upstream
// cudarc changes.
DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 => {
Err(CudaError::UnsupportedDtype {
dtype,
op: "rand_uniform",
})
.w()?
}
DType::F32 => {
let mut data = unsafe { self.alloc::<f32>(elem_count) }.w()?;
curand.0.fill_with_uniform(&mut data).w()?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let mut data = unsafe { self.alloc::<f64>(elem_count) }.w()?;
curand.0.fill_with_uniform(&mut data).w()?;
CudaStorageSlice::F64(data)
}
};
let slice = if lo == 0. && up == 1.0 {
slice
} else {
use super::utils::Map1;
let layout = Layout::contiguous(shape);
super::Affine(up - lo, lo).map(&slice, self, &layout)?
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn rand_normal(&self, shape: &Shape, dtype: DType, mean: f64, std: f64) -> Result<CudaStorage> {
// TODO: Add support for F16 and BF16 though this is likely to require some upstream
// cudarc changes.
let elem_count = shape.elem_count();
let curand = self.curand.lock().unwrap();
// curand can only generate an odd number of values.
// https://github.com/huggingface/candle/issues/734
let elem_count_round = if elem_count % 2 == 1 {
elem_count + 1
} else {
elem_count
};
let slice = match dtype {
DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 => {
Err(CudaError::UnsupportedDtype {
dtype,
op: "rand_normal",
})
.w()?
}
DType::F32 => {
let mut data = unsafe { self.alloc::<f32>(elem_count_round) }.w()?;
curand
.0
.fill_with_normal(&mut data, mean as f32, std as f32)
.w()?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let mut data = unsafe { self.alloc::<f64>(elem_count_round) }.w()?;
curand.0.fill_with_normal(&mut data, mean, std).w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<CudaStorage> {
self.const_impl(1., shape, dtype)
}
unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<Self::Storage> {
let elem_count = shape.elem_count();
let slice = match dtype {
DType::U8 => {
let data = self.alloc::<u8>(elem_count).w()?;
CudaStorageSlice::U8(data)
}
DType::U32 => {
let data = self.alloc::<u32>(elem_count).w()?;
CudaStorageSlice::U32(data)
}
DType::I64 => {
let data = self.alloc::<i64>(elem_count).w()?;
CudaStorageSlice::I64(data)
}
DType::BF16 => {
let data = self.alloc::<bf16>(elem_count).w()?;
CudaStorageSlice::BF16(data)
}
DType::F16 => {
let data = self.alloc::<f16>(elem_count).w()?;
CudaStorageSlice::F16(data)
}
DType::F32 => {
let data = self.alloc::<f32>(elem_count).w()?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let data = self.alloc::<f64>(elem_count).w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn storage_from_slice<T: crate::WithDType>(&self, s: &[T]) -> Result<Self::Storage> {
let slice = match T::cpu_storage_ref(s) {
CpuStorageRef::U8(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::U8(data)
}
CpuStorageRef::U32(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::U32(data)
}
CpuStorageRef::I64(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::I64(data)
}
CpuStorageRef::BF16(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::BF16(data)
}
CpuStorageRef::F16(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::F16(data)
}
CpuStorageRef::F32(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::F32(data)
}
CpuStorageRef::F64(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<CudaStorage> {
let slice = match storage {
CpuStorage::U8(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::U8(data)
}
CpuStorage::U32(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::U32(data)
}
CpuStorage::I64(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::I64(data)
}
CpuStorage::BF16(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::BF16(data)
}
CpuStorage::F16(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::F16(data)
}
CpuStorage::F32(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::F32(data)
}
CpuStorage::F64(storage) => {
let data = self.htod_sync_copy(storage).w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn storage_from_cpu_storage_owned(&self, storage: CpuStorage) -> Result<CudaStorage> {
let slice = match storage {
CpuStorage::U8(storage) => {
let data = self.htod_copy(storage).w()?;
CudaStorageSlice::U8(data)
}
CpuStorage::U32(storage) => {
let data = self.htod_copy(storage).w()?;
CudaStorageSlice::U32(data)
}
CpuStorage::I64(storage) => {
let data = self.htod_copy(storage).w()?;
CudaStorageSlice::I64(data)
}
CpuStorage::BF16(storage) => {
let data = self.htod_copy(storage).w()?;
CudaStorageSlice::BF16(data)
}
CpuStorage::F16(storage) => {
let data = self.htod_copy(storage).w()?;
CudaStorageSlice::F16(data)
}
CpuStorage::F32(storage) => {
let data = self.htod_copy(storage).w()?;
CudaStorageSlice::F32(data)
}
CpuStorage::F64(storage) => {
let data = self.htod_copy(storage).w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn synchronize(&self) -> Result<()> {
self.device.synchronize().map_err(crate::Error::wrap)?;
Ok(())
}
}
| candle/candle-core/src/cuda_backend/device.rs/0 | {
"file_path": "candle/candle-core/src/cuda_backend/device.rs",
"repo_id": "candle",
"token_count": 9908
} |
#![allow(dead_code)]
use libc::{c_char, c_double, c_float, c_int};
mod ffi {
use super::*;
extern "C" {
pub fn vsTanh(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdTanh(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsExp(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdExp(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsLn(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdLn(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsSin(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdSin(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsCos(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdCos(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsSqrt(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdSqrt(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsAdd(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdAdd(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsSub(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdSub(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsMul(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdMul(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsDiv(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdDiv(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsFmax(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdFmax(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsFmin(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdFmin(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn sgemm_(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const c_float,
a: *const c_float,
lda: *const c_int,
b: *const c_float,
ldb: *const c_int,
beta: *const c_float,
c: *mut c_float,
ldc: *const c_int,
);
pub fn dgemm_(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const c_double,
a: *const c_double,
lda: *const c_int,
b: *const c_double,
ldb: *const c_int,
beta: *const c_double,
c: *mut c_double,
ldc: *const c_int,
);
pub fn hgemm_(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const half::f16,
a: *const half::f16,
lda: *const c_int,
b: *const half::f16,
ldb: *const c_int,
beta: *const half::f16,
c: *mut half::f16,
ldc: *const c_int,
);
}
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn sgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: f32,
a: &[f32],
lda: i32,
b: &[f32],
ldb: i32,
beta: f32,
c: &mut [f32],
ldc: i32,
) {
ffi::sgemm_(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn dgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: f64,
a: &[f64],
lda: i32,
b: &[f64],
ldb: i32,
beta: f64,
c: &mut [f64],
ldc: i32,
) {
ffi::dgemm_(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn hgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: half::f16,
a: &[half::f16],
lda: i32,
b: &[half::f16],
ldb: i32,
beta: half::f16,
c: &mut [half::f16],
ldc: i32,
) {
ffi::hgemm_(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[inline]
pub fn vs_exp(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_exp(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_ln(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_ln(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_sin(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_sin(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_cos(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_cos(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_sqrt(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_sqrt(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_sqr(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_sqr(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_tanh(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_tanh(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
// The vector functions from mkl can be performed in place by using the same array for input and
// output.
// https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/vector-mathematical-functions.html
#[inline]
pub fn vs_tanh_inplace(y: &mut [f32]) {
unsafe { ffi::vsTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_tanh_inplace(y: &mut [f64]) {
unsafe { ffi::vdTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_exp_inplace(y: &mut [f32]) {
unsafe { ffi::vsExp(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_exp_inplace(y: &mut [f64]) {
unsafe { ffi::vdExp(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v)
}
vs_tanh_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = 0.5 * v * (1.0 + *y)
}
}
#[inline]
pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v)
}
vd_tanh_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = 0.5 * v * (1.0 + *y)
}
}
#[inline]
pub fn vs_silu(vs: &[f32], ys: &mut [f32]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = -v
}
vs_exp_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = v / (1.0 + *y)
}
}
#[inline]
pub fn vd_silu(vs: &[f64], ys: &mut [f64]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = -v
}
vd_exp_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = v / (1.0 + *y)
}
}
macro_rules! binary_op {
($fn_name:ident, $ty:ty, $mkl_name:ident) => {
#[inline]
pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) {
let a_len = a.len();
let b_len = b.len();
let y_len = y.len();
if a_len != y_len || b_len != y_len {
panic!(
"{} a,b,y len mismatch {a_len} {b_len} {y_len}",
stringify!($fn_name)
);
}
unsafe { ffi::$mkl_name(a_len as i32, a.as_ptr(), b.as_ptr(), y.as_mut_ptr()) }
}
};
}
binary_op!(vs_add, f32, vsAdd);
binary_op!(vd_add, f64, vdAdd);
binary_op!(vs_sub, f32, vsSub);
binary_op!(vd_sub, f64, vdSub);
binary_op!(vs_mul, f32, vsMul);
binary_op!(vd_mul, f64, vdMul);
binary_op!(vs_div, f32, vsDiv);
binary_op!(vd_div, f64, vdDiv);
binary_op!(vs_max, f32, vsFmax);
binary_op!(vd_max, f64, vdFmax);
binary_op!(vs_min, f32, vsFmin);
binary_op!(vd_min, f64, vdFmin);
| candle/candle-core/src/mkl.rs/0 | {
"file_path": "candle/candle-core/src/mkl.rs",
"repo_id": "candle",
"token_count": 6463
} |
//! Module to load `safetensor` files into CPU/GPU memory.
//!
//! There are multiple ways to load tensors from safetensor files:
//! - `load` function for loading directly into memory and returning a HashMap of tensors
//! - `MmapedSafetensors` for memory mapping files and avoiding full allocation
//! - `SliceSafetensors` for working with in-memory buffers
//! - `BufferedSafetensors` for owning a buffer of data
//!
//! Tensors can also be serialized to safetensor format using the `save` function or
//! `Tensor::save_safetensors` method.
//!
use crate::{DType, Device, Error, Result, Tensor, WithDType};
use safetensors::tensor as st;
use safetensors::tensor::SafeTensors;
use std::borrow::Cow;
use std::collections::HashMap;
use std::path::Path;
impl From<DType> for st::Dtype {
fn from(value: DType) -> Self {
match value {
DType::U8 => st::Dtype::U8,
DType::U32 => st::Dtype::U32,
DType::I64 => st::Dtype::I64,
DType::BF16 => st::Dtype::BF16,
DType::F16 => st::Dtype::F16,
DType::F32 => st::Dtype::F32,
DType::F64 => st::Dtype::F64,
}
}
}
impl TryFrom<st::Dtype> for DType {
type Error = Error;
fn try_from(value: st::Dtype) -> Result<Self> {
match value {
st::Dtype::U8 => Ok(DType::U8),
st::Dtype::U32 => Ok(DType::U32),
st::Dtype::I64 => Ok(DType::I64),
st::Dtype::BF16 => Ok(DType::BF16),
st::Dtype::F16 => Ok(DType::F16),
st::Dtype::F32 => Ok(DType::F32),
st::Dtype::F64 => Ok(DType::F64),
dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)),
}
}
}
impl st::View for Tensor {
fn dtype(&self) -> st::Dtype {
self.dtype().into()
}
fn shape(&self) -> &[usize] {
self.shape().dims()
}
fn data(&self) -> Cow<[u8]> {
// This copies data from GPU to CPU.
// TODO: Avoid the unwrap here.
Cow::Owned(convert_back(self).unwrap())
}
fn data_len(&self) -> usize {
let n: usize = self.shape().elem_count();
let bytes_per_element = self.dtype().size_in_bytes();
n * bytes_per_element
}
}
impl st::View for &Tensor {
fn dtype(&self) -> st::Dtype {
(*self).dtype().into()
}
fn shape(&self) -> &[usize] {
self.dims()
}
fn data(&self) -> Cow<[u8]> {
// This copies data from GPU to CPU.
// TODO: Avoid the unwrap here.
Cow::Owned(convert_back(self).unwrap())
}
fn data_len(&self) -> usize {
let n: usize = self.dims().iter().product();
let bytes_per_element = (*self).dtype().size_in_bytes();
n * bytes_per_element
}
}
impl Tensor {
pub fn save_safetensors<P: AsRef<Path>>(&self, name: &str, filename: P) -> Result<()> {
let data = [(name, self.clone())];
Ok(st::serialize_to_file(data, &None, filename.as_ref())?)
}
}
fn convert_slice<T: WithDType>(data: &[u8], shape: &[usize], device: &Device) -> Result<Tensor> {
let size_in_bytes = T::DTYPE.size_in_bytes();
let elem_count = data.len() / size_in_bytes;
if (data.as_ptr() as usize) % size_in_bytes == 0 {
// SAFETY This is safe because we just checked that this
// was correctly aligned.
let data: &[T] =
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) };
Tensor::from_slice(data, shape, device)
} else {
// XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast
// Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access
let mut c: Vec<T> = Vec::with_capacity(elem_count);
// SAFETY: We just created c, so the allocated memory is necessarily
// contiguous and non overlapping with the view's data.
// We're downgrading the `c` pointer from T to u8, which removes alignment
// constraints.
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len());
c.set_len(elem_count)
}
Tensor::from_slice(&c, shape, device)
}
}
fn convert_slice_with_cast<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>(
data: &[u8],
shape: &[usize],
device: &Device,
conv: F,
) -> Result<Tensor> {
let size_in_bytes = std::mem::size_of::<T>();
let elem_count = data.len() / size_in_bytes;
if (data.as_ptr() as usize) % size_in_bytes == 0 {
// SAFETY This is safe because we just checked that this
// was correctly aligned.
let data: &[T] =
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) };
let data = data.iter().map(|t| conv(*t)).collect::<Result<Vec<_>>>()?;
Tensor::from_vec(data, shape, device)
} else {
// XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast
// Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access
let mut c: Vec<T> = Vec::with_capacity(elem_count);
// SAFETY: We just created c, so the allocated memory is necessarily
// contiguous and non overlapping with the view's data.
// We're downgrading the `c` pointer from T to u8, which removes alignment
// constraints.
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len());
c.set_len(elem_count)
}
let c = c.into_iter().map(conv).collect::<Result<Vec<_>>>()?;
Tensor::from_vec(c, shape, device)
}
}
fn convert_with_cast_<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>(
view: &st::TensorView<'_>,
device: &Device,
conv: F,
) -> Result<Tensor> {
convert_slice_with_cast::<T, U, F>(view.data(), view.shape(), device, conv)
}
fn convert_<T: WithDType>(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> {
convert_slice::<T>(view.data(), view.shape(), device)
}
fn convert_back_<T: WithDType>(mut vs: Vec<T>) -> Vec<u8> {
let size_in_bytes = T::DTYPE.size_in_bytes();
let length = vs.len() * size_in_bytes;
let capacity = vs.capacity() * size_in_bytes;
let ptr = vs.as_mut_ptr() as *mut u8;
// Don't run the destructor for Vec<T>
std::mem::forget(vs);
// SAFETY:
//
// Every T is larger than u8, so there is no issue regarding alignment.
// This re-interpret the Vec<T> as a Vec<u8>.
unsafe { Vec::from_raw_parts(ptr, length, capacity) }
}
pub trait Load {
fn load(&self, device: &Device) -> Result<Tensor>;
}
impl Load for st::TensorView<'_> {
fn load(&self, device: &Device) -> Result<Tensor> {
convert(self, device)
}
}
impl Tensor {
pub fn from_raw_buffer(
data: &[u8],
dtype: DType,
shape: &[usize],
device: &Device,
) -> Result<Self> {
match dtype {
DType::U8 => convert_slice::<u8>(data, shape, device),
DType::U32 => convert_slice::<u32>(data, shape, device),
DType::I64 => convert_slice::<i64>(data, shape, device),
DType::BF16 => convert_slice::<half::bf16>(data, shape, device),
DType::F16 => convert_slice::<half::f16>(data, shape, device),
DType::F32 => convert_slice::<f32>(data, shape, device),
DType::F64 => convert_slice::<f64>(data, shape, device),
}
}
}
fn convert(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> {
match view.dtype() {
st::Dtype::U8 => convert_::<u8>(view, device),
st::Dtype::U16 => {
let conv = |x| Ok(u32::from(x));
convert_with_cast_::<u16, u32, _>(view, device, conv)
}
st::Dtype::U32 => convert_::<u32>(view, device),
st::Dtype::I32 => {
let conv = |x| Ok(i64::from(x));
convert_with_cast_::<i32, i64, _>(view, device, conv)
}
st::Dtype::I64 => convert_::<i64>(view, device),
st::Dtype::BF16 => convert_::<half::bf16>(view, device),
st::Dtype::F16 => convert_::<half::f16>(view, device),
st::Dtype::F32 => convert_::<f32>(view, device),
st::Dtype::F64 => convert_::<f64>(view, device),
dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)),
}
}
fn convert_back(tensor: &Tensor) -> Result<Vec<u8>> {
// TODO: This makes an unnecessary copy when the tensor is on the cpu.
let tensor = tensor.flatten_all()?;
match tensor.dtype() {
DType::U8 => Ok(convert_back_::<u8>(tensor.to_vec1()?)),
DType::U32 => Ok(convert_back_::<u32>(tensor.to_vec1()?)),
DType::I64 => Ok(convert_back_::<i64>(tensor.to_vec1()?)),
DType::F16 => Ok(convert_back_::<half::f16>(tensor.to_vec1()?)),
DType::BF16 => Ok(convert_back_::<half::bf16>(tensor.to_vec1()?)),
DType::F32 => Ok(convert_back_::<f32>(tensor.to_vec1()?)),
DType::F64 => Ok(convert_back_::<f64>(tensor.to_vec1()?)),
}
}
pub fn load<P: AsRef<Path>>(filename: P, device: &Device) -> Result<HashMap<String, Tensor>> {
let data = std::fs::read(filename.as_ref())?;
load_buffer(&data[..], device)
}
pub fn load_buffer(data: &[u8], device: &Device) -> Result<HashMap<String, Tensor>> {
let st = safetensors::SafeTensors::deserialize(data)?;
st.tensors()
.into_iter()
.map(|(name, view)| Ok((name, view.load(device)?)))
.collect()
}
pub fn save<K: AsRef<str> + Ord + std::fmt::Display, P: AsRef<Path>>(
tensors: &HashMap<K, Tensor>,
filename: P,
) -> Result<()> {
Ok(st::serialize_to_file(tensors, &None, filename.as_ref())?)
}
#[derive(yoke::Yokeable)]
struct SafeTensors_<'a>(SafeTensors<'a>);
pub struct MmapedSafetensors {
safetensors: Vec<yoke::Yoke<SafeTensors_<'static>, memmap2::Mmap>>,
routing: Option<HashMap<String, usize>>,
}
impl MmapedSafetensors {
/// Creates a wrapper around a memory mapped file and deserialize the safetensors header.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> {
let p = p.as_ref();
let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?;
let file = memmap2::MmapOptions::new()
.map(&file)
.map_err(|e| Error::from(e).with_path(p))?;
let safetensors = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart(
file,
|data: &[u8]| {
let st = safetensors::SafeTensors::deserialize(data)
.map_err(|e| Error::from(e).with_path(p))?;
Ok::<_, Error>(SafeTensors_(st))
},
)?;
Ok(Self {
safetensors: vec![safetensors],
routing: None,
})
}
/// Creates a wrapper around multiple memory mapped file and deserialize the safetensors headers.
///
/// If a tensor name appears in multiple files, the last entry is returned.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn multi<P: AsRef<Path>>(paths: &[P]) -> Result<Self> {
let mut routing = HashMap::new();
let mut safetensors = vec![];
for (index, p) in paths.iter().enumerate() {
let p = p.as_ref();
let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?;
let file = memmap2::MmapOptions::new()
.map(&file)
.map_err(|e| Error::from(e).with_path(p))?;
let data = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart(
file,
|data: &[u8]| {
let st = safetensors::SafeTensors::deserialize(data)
.map_err(|e| Error::from(e).with_path(p))?;
Ok::<_, Error>(SafeTensors_(st))
},
)?;
for k in data.get().0.names() {
routing.insert(k.to_string(), index);
}
safetensors.push(data)
}
Ok(Self {
safetensors,
routing: Some(routing),
})
}
pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> {
self.get(name)?.load(dev)
}
pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> {
let mut tensors = vec![];
for safetensors in self.safetensors.iter() {
tensors.push(safetensors.get().0.tensors())
}
tensors.into_iter().flatten().collect()
}
pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> {
let index = match &self.routing {
None => 0,
Some(routing) => {
let index = routing.get(name).ok_or_else(|| {
Error::CannotFindTensor {
path: name.to_string(),
}
.bt()
})?;
*index
}
};
Ok(self.safetensors[index].get().0.tensor(name)?)
}
}
pub struct SliceSafetensors<'a> {
safetensors: SafeTensors<'a>,
}
impl<'a> SliceSafetensors<'a> {
/// Creates a wrapper around a binary buffer and deserialize the safetensors header.
pub fn new(buffer: &'a [u8]) -> Result<Self> {
let safetensors = safetensors::SafeTensors::deserialize(buffer)?;
Ok(Self { safetensors })
}
pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> {
self.safetensors.tensor(name)?.load(dev)
}
pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> {
self.safetensors.tensors()
}
pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> {
Ok(self.safetensors.tensor(name)?)
}
}
pub struct BufferedSafetensors {
safetensors: yoke::Yoke<SafeTensors_<'static>, Vec<u8>>,
}
impl BufferedSafetensors {
/// Creates a wrapper around a binary buffer and deserialize the safetensors header.
pub fn new(buffer: Vec<u8>) -> Result<Self> {
let safetensors = yoke::Yoke::<SafeTensors_<'static>, Vec<u8>>::try_attach_to_cart(
buffer,
|data: &[u8]| {
let st = safetensors::SafeTensors::deserialize(data)?;
Ok::<_, Error>(SafeTensors_(st))
},
)?;
Ok(Self { safetensors })
}
pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> {
self.get(name)?.load(dev)
}
pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> {
self.safetensors.get().0.tensors()
}
pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> {
Ok(self.safetensors.get().0.tensor(name)?)
}
}
pub struct MmapedFile {
path: std::path::PathBuf,
inner: memmap2::Mmap,
}
impl MmapedFile {
/// Creates a wrapper around a memory mapped file from which you can retrieve
/// tensors using [`MmapedFile::deserialize`]
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> {
let p = p.as_ref();
let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?;
let inner = memmap2::MmapOptions::new()
.map(&file)
.map_err(|e| Error::from(e).with_path(p))?;
Ok(Self {
inner,
path: p.to_path_buf(),
})
}
pub fn deserialize(&self) -> Result<SafeTensors<'_>> {
let st = safetensors::SafeTensors::deserialize(&self.inner)
.map_err(|e| Error::from(e).with_path(&self.path))?;
Ok(st)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn save_single_tensor() {
let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap();
t.save_safetensors("t", "t.safetensors").unwrap();
let bytes = std::fs::read("t.safetensors").unwrap();
assert_eq!(bytes, b"@\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
std::fs::remove_file("t.safetensors").unwrap();
}
#[test]
fn save_load_multiple_tensors() {
let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap();
let u = Tensor::zeros((1, 2), DType::F32, &Device::Cpu).unwrap();
let map: HashMap<_, _> = [("t", t), ("u", u)].into_iter().collect();
save(&map, "multi.safetensors").unwrap();
let weights = load("multi.safetensors", &Device::Cpu).unwrap();
assert_eq!(weights.get("t").unwrap().dims(), &[2, 2]);
assert_eq!(weights.get("u").unwrap().dims(), &[1, 2]);
let bytes = std::fs::read("multi.safetensors").unwrap();
assert_eq!(bytes, b"x\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]},\"u\":{\"dtype\":\"F32\",\"shape\":[1,2],\"data_offsets\":[16,24]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
std::fs::remove_file("multi.safetensors").unwrap();
}
}
| candle/candle-core/src/safetensors.rs/0 | {
"file_path": "candle/candle-core/src/safetensors.rs",
"repo_id": "candle",
"token_count": 8223
} |
#![allow(clippy::approx_constant)]
use anyhow::{Context, Result};
use candle_core::{test_device, test_utils, Device, Shape, Tensor, Var};
fn simple_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., 4.], device)?;
let x = x.as_tensor();
let y = (((x * x)? + x * 5f64)? + 4f64)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(x.to_vec1::<f32>()?, [3., 1., 4.]);
// y = x^2 + 5.x + 4
assert_eq!(y.to_vec1::<f32>()?, [28., 10., 40.]);
// dy/dx = 2.x + 5
assert_eq!(grad_x.to_vec1::<f32>()?, [11., 7., 13.]);
Ok(())
}
fn sum_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., 4.], device)?;
let x = x.as_tensor();
let y = (x.sqr()?.sum_keepdim(0)? * 2.)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [52.]);
// y = 2.x^2 so dy/dx = 4.x
assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]);
// Same test as before but squeezing on the last dimension.
let y = (x.sqr()?.sum_keepdim(0)? * 2.)?.squeeze(0)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_scalar::<f32>()?, 52.);
// y = 2.x^2 so dy/dx = 4.x
assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]);
Ok(())
}
fn matmul_grad(device: &Device) -> Result<()> {
let data: Vec<_> = (0..12).map(|i| i as f32).collect();
let x = Var::from_slice(&data, (2, 2, 3), device)?;
let data: Vec<_> = (0..12).map(|i| i as f32).collect();
let y = Var::from_slice(&data, (2, 3, 2), device)?;
let c = x.matmul(&y)?;
let grads = c.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
let grad_y = grads.get(&y).context("no grad for y")?;
assert_eq!(grad_x.shape(), &Shape::from((2, 2, 3)));
assert_eq!(grad_y.shape(), &Shape::from((2, 3, 2)));
assert_eq!(
&*grad_x.to_vec3::<f32>()?,
&[
[[1., 5., 9.], [1., 5., 9.]],
[[13., 17., 21.], [13., 17., 21.]]
]
);
assert_eq!(
&*grad_y.to_vec3::<f32>()?,
&[
[[3., 3.], [5., 5.], [7., 7.]],
[[15., 15.], [17., 17.], [19., 19.]]
]
);
Ok(())
}
// The simplest gradient descent, using scalar variable.
fn grad_descent(device: &Device) -> Result<()> {
let x = Var::new(0f32, device)?;
let learning_rate = 0.1;
for _step in 0..100 {
let xt = x.as_tensor();
let c = ((xt - 4.2)? * (xt - 4.2)?)?;
let grads = c.backward()?;
let x_grad = grads.get(&x).context("no grad for x")?;
x.set(&(xt - x_grad * learning_rate)?)?
}
assert_eq!(x.to_scalar::<f32>()?, 4.199999);
Ok(())
}
fn unary_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., 4., 0.15], device)?;
let x = x.as_tensor();
let y = (x.log()? + 1.)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.0986, 1.0, 2.3863, -0.8971]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[0.3333, 1.0, 0.25, 6.6667]
);
let y = x.exp()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[20.0855, 2.7183, 54.5982, 1.1618]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[20.0855, 2.7183, 54.5982, 1.1618]
);
let y = x.exp()?.sqr()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 3)?,
[403.429, 7.389, 2980.958, 1.35]
);
// exp(x)^2 = exp(2*x)
assert_eq!(
test_utils::to_vec1_round(grad_x, 2)?,
[806.86, 14.78, 5961.92, 2.7]
);
let y = x.sin()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[0.1411, 0.8415, -0.7568, 0.1494],
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[-0.99, 0.5403, -0.6536, 0.9888],
);
let y = x.cos()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-0.99, 0.5403, -0.6536, 0.9888],
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[-0.1411, -0.8415, 0.7568, -0.1494],
);
let y = x.sqr()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [9.0, 1.0, 16.0, 0.0225]);
assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, 8.0, 0.3]);
let y = x.sqr()?.sqrt()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [3.0, 1.0, 4.0, 0.15]);
assert_eq!(test_utils::to_vec1_round(grad_x, 4)?, [1.0, 1.0, 1.0, 1.0]);
let y = x.neg()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [-3.0, -1.0, -4.0, -0.15]);
assert_eq!(grad_x.to_vec1::<f32>()?, [-1.0, -1.0, -1.0, -1.0]);
let y = x.affine(0.2, 1.)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [1.6, 1.2, 1.8, 1.03]);
assert_eq!(grad_x.to_vec1::<f32>()?, [0.2, 0.2, 0.2, 0.2]);
let y = Tensor::new(1f32, device)?.broadcast_div(x)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[0.3333, 1.0, 0.25, 6.6667]
);
assert_eq!(
grad_x.to_vec1::<f32>()?,
[-0.11111111, -1.0, -0.0625, -44.444443],
);
let y = x.broadcast_div(&Tensor::new(0.5f32, device)?)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [6., 2., 8., 0.3]);
assert_eq!(grad_x.to_vec1::<f32>()?, [2., 2., 2., 2.]);
let x = Var::new(&[3f32, 1., 4., 0.15], device)?;
let y = x.powf(2.5)?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(test_utils::to_vec1_round(&y, 2)?, [15.59, 1.0, 32.0, 0.01]);
assert_eq!(
test_utils::to_vec1_round(grad_x, 2)?,
[12.99, 2.5, 20.0, 0.15]
);
let y = x.tanh()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(test_utils::to_vec1_round(&y, 2)?, [1.0, 0.76, 1.0, 0.15]);
assert_eq!(
test_utils::to_vec1_round(grad_x, 2)?,
[0.01, 0.42, 0.0, 0.98],
);
// testing compared to pytorch nn.GELU(approximate = 'tanh')
let y = x.gelu()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.9964, 0.8412, 3.9999, 0.0839]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[1.0116, 1.0830, 1.0003, 0.6188],
);
// Testing compared to pytorch torch.erf
//
// import torch
// x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True)
// y = x.erf()
// print(y)
// loss = y.sum()
// loss.backward()
// print(x.grad)
let y = x.erf()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(test_utils::to_vec1_round(&y, 4)?, [1.0, 0.8427, 1.0, 0.168]);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[0.0001, 0.4151, 0.0, 1.1033],
);
// Testing compared to pytorch nn.GELU(approximate = 'none')
//
// import torch
// import torch.nn.functional as F
// x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True)
// y = F.gelu(x, approximate='none')
// print(y)
// loss = y.sum()
// loss.backward()
// print(x.grad)
let y = x.gelu_erf()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.9960, 0.8413, 3.9999, 0.0839]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[1.0119, 1.0833, 1.0005, 0.6188],
);
// Testing compared to pytorch elu
//
// import torch
// import torch.nn.functional as F
// x = torch.tensor([-1.0, 0.0, -2.0, 3.0], requires_grad=True)
// y = F.elu(x, alpha=2.0)
// print(y)
// loss = y.min
// loss = y.sum()
// loss.backward()
// print(x.grad)
let elu_x = Var::new(&[-1.0f32, 0., -2., 3.], device)?;
let y = elu_x.elu(2.)?;
let grads = y.backward()?;
let grad_x = grads.get(&elu_x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-1.2642, 0.0000, -1.7293, 3.0000]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[0.7358, 2.0000, 0.2707, 1.0000]
);
// testing compared to pytorch nn.Silu()
let y = x.silu()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.8577, 0.7311, 3.9281, 0.0806]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[1.0881, 0.9277, 1.0527, 0.5747],
);
if device.is_cpu() {
let x = Var::new(&[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]], device)?;
let y = x.interpolate1d(12)?.reshape(36)?;
let z = Tensor::new(
&[
1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16.,
17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32.,
33., 34., 35., 36.,
],
device,
)?;
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec3_round(grad_x, 4)?,
[[[10_f32, 26., 42.], [58., 74., 90.], [106., 122., 138.]]]
);
}
// manually checked: see comments
let x = Var::new(&[[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]]], device)?;
let y = x.interpolate2d(6, 6)?.reshape(36)?;
let z = Tensor::new(
&[
1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34.,
35., 36.,
],
device,
)?;
// gradient should be
// row 1
// 1+2+7+8 = 18
// 3+4+9+10 = 26
// 5+6+11+12 = 34
// row 2
// 13+14+19+20 = 66
// 15+16+21+22 = 74
// 17+18+23+24 = 82
// row 3
// 25+26+31+32 = 114
// 27+28+33+34 = 122
// 29+30+35+36 = 130
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?,
[[18_f32, 26., 34.], [66., 74., 82.], [114., 122., 130.]]
);
// manually checked: see comments
let x = Var::new(&[[[[1f32, 2.], [4., 5.]]]], device)?;
let y = x.interpolate2d(6, 6)?.reshape(36)?;
let z = Tensor::new(
&[
1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34.,
35., 36.,
],
device,
)?;
// gradient should be
// row 1
// 1+2+3+7+8+9+13+14+15 = 72
// 4+5+6+10+11+12+16+17+18 = 99
// row 2
// 19+20+21+25+26+27+31+32+33 = 234
// 22+23+24+28+29+30+34+35+36 = 243
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?,
[[72_f32, 99.], [234., 261.]]
);
// manually checked: see comments
let x = Var::new(&[[[[1f32, 2.], [4., 5.]], [[6f32, 7.], [8., 9.]]]], device)?;
let y = x.interpolate2d(4, 4)?.reshape(32)?;
#[rustfmt::skip]
let z = Tensor::new(
&[
1_f32, 02., 03., 04.,
05., 06., 07., 08.,
09., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.,
25., 26., 27., 28.,
29., 30., 31., 32.
],
device,
)?;
// gradient should be
// m1r1
// 1+2+5+6=14
// 3+4+7+8=22
// m1r2
// 9+10+13+14=46
// 11+12+15+16=54
// m2r1
// 17+18+21+22=78
// 19+20+23+24=86
// m2r2
// 25+26+29+30=110
// 27+28+31+32=118
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?,
[[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]]
);
// manually checked: see comments
let x = Var::new(
&[[[[1f32, 2.], [4., 5.]]], [[[6f32, 7.], [8., 9.]]]],
device,
)?;
let y = x.interpolate2d(4, 4)?.reshape(32)?;
#[rustfmt::skip]
let z = Tensor::new(
&[
1_f32, 02., 03., 04.,
05., 06., 07., 08.,
09., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.,
25., 26., 27., 28.,
29., 30., 31., 32.
],
device,
)?;
// gradient should be
// m1r1
// 1+2+5+6=14
// 3+4+7+8=22
// m1r2
// 9+10+13+14=46
// 11+12+15+16=54
// m2r1
// 17+18+21+22=78
// 19+20+23+24=86
// m2r2
// 25+26+29+30=110
// 27+28+31+32=118
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?,
[[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]]
);
Ok(())
}
fn binary_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., -4., -1.], device)?;
let x = x.as_tensor();
// leaky relu
let y = x.maximum(&(x * 0.1)?)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(x.to_vec1::<f32>()?, [3., 1., -4., -1.]);
assert_eq!(y.to_vec1::<f32>()?, [3., 1., -0.4, -0.1]);
assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 0.1, 0.1]);
let y = x.minimum(&(x * 0.1)?)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [0.3, 0.1, -4., -1.]);
assert_eq!(grad_x.to_vec1::<f32>()?, [0.1, 0.1, 1., 1.]);
// This one is easy to mess up, we want the gradient to be one as it is the identity function.
let y = x.minimum(x)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [3., 1., -4., -1.]);
assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 1., 1.]);
let x_var = Var::new(&[3f32, 1., -4., -1., 5., 9.], device)?;
let x = x_var.as_tensor();
let y_var = Var::new(&[2f32, 7., 1.], device)?;
let y = y_var.as_tensor();
let ss = x
.reshape((2, 3))?
.slice_scatter0(&y.reshape((1, 3))?, 1)?
.sqr()?;
let grads = ss.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
let grad_y = grads.get(y).context("no grad for y")?;
assert_eq!(ss.to_vec2::<f32>()?, [[9., 1., 16.], [4., 49., 1.]]);
assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, -8.0, 0.0, 0.0, 0.0]);
assert_eq!(grad_y.to_vec1::<f32>()?, [4.0, 14.0, 2.0]);
Ok(())
}
test_device!(
simple_grad,
simple_grad_cpu,
simple_grad_gpu,
simple_grad_metal
);
test_device!(sum_grad, sum_grad_cpu, sum_grad_gpu, sum_grad_metal);
test_device!(
matmul_grad,
matmul_grad_cpu,
matmul_grad_gpu,
matmul_grad_metal
);
test_device!(
grad_descent,
grad_descent_cpu,
grad_descent_gpu,
grad_descent_metal
);
test_device!(unary_grad, unary_grad_cpu, unary_grad_gpu, unary_grad_metal);
test_device!(
binary_grad,
binary_grad_cpu,
binary_grad_gpu,
binary_grad_metal
);
| candle/candle-core/tests/grad_tests.rs/0 | {
"file_path": "candle/candle-core/tests/grad_tests.rs",
"repo_id": "candle",
"token_count": 9105
} |
# candle-bert
Bert is a general large language model. In this example it can be used for two
different tasks:
- Compute sentence embeddings for a prompt.
- Compute similarities between a set of sentences.
## Sentence embeddings
Bert is used to compute the sentence embeddings for a prompt. The model weights
are downloaded from the hub on the first run.
```bash
cargo run --example bert --release -- --prompt "Here is a test sentence"
> [[[ 0.0798, -0.0665, -0.0247, ..., -0.1082, -0.1000, -0.2751],
> [ 0.4218, 0.2690, 0.2740, ..., 0.3889, 1.3503, 0.9908],
> [ 0.0466, 0.3041, -0.1143, ..., 0.4427, 0.6926, -0.1515],
> ...
> [ 0.3396, 0.4320, -0.4408, ..., 0.9212, 0.2331, -0.6777],
> [ 0.2789, 0.7539, 0.4306, ..., -0.0095, 0.3375, -1.7529],
> [ 0.6737, 0.7882, 0.0548, ..., 0.1836, 0.7299, -0.6617]]]
> Tensor[[1, 7, 384], f32]
```
### Custom models
You can specify different models, such as BGE, with the `--model-id` flag:
```bash
cargo run --example bert --release -- \
--model-id BAAI/bge-large-zh-v1.5 \
--prompt "Here is a test sentence"
Loaded and encoded 435.70775ms
[[[ 3.0944e-1, -7.8455e-5, -1.2768e0, ..., 1.3755e-2, -3.2371e-1, 2.3819e-1],
[-2.8506e-1, 1.9953e-1, -1.3076e0, ..., 6.9819e-2, 1.0833e-2, -1.1512e0],
[ 3.9892e-1, 2.0000e-1, -9.3178e-1, ..., -4.1393e-1, -4.9644e-2, -3.3786e-1],
...
[ 6.0345e-1, 3.5744e-1, -1.2672e0, ..., -6.9165e-1, -3.4973e-3, -8.4214e-1],
[ 3.9218e-1, -3.2735e-1, -1.3123e0, ..., -4.9318e-1, -5.1334e-1, -3.6391e-1],
[ 3.0978e-1, 2.5662e-4, -1.2773e0, ..., 1.3357e-2, -3.2390e-1, 2.3858e-1]]]
Tensor[[1, 9, 1024], f32]
Took 176.744667ms
```
### Gelu approximation
You can get a speedup by using an approximation of the gelu activation, with a
small loss of precision, by passing the `--approximate-gelu` flag:
```bash
$ cargo run --example bert --release -- \
--model-id BAAI/bge-large-zh-v1.5 \
--prompt "Here is a test sentence" \
--approximate-gelu
Loaded and encoded 244.388042ms
[[[ 3.1048e-1, -6.0339e-4, -1.2758e0, ..., 1.3718e-2, -3.2362e-1, 2.3775e-1],
[-2.8354e-1, 1.9984e-1, -1.3077e0, ..., 6.9390e-2, 9.9681e-3, -1.1531e0],
[ 3.9947e-1, 1.9917e-1, -9.3178e-1, ..., -4.1301e-1, -5.0719e-2, -3.3955e-1],
...
[ 6.0499e-1, 3.5664e-1, -1.2642e0, ..., -6.9134e-1, -3.4581e-3, -8.4471e-1],
[ 3.9311e-1, -3.2812e-1, -1.3105e0, ..., -4.9291e-1, -5.1270e-1, -3.6543e-1],
[ 3.1082e-1, -2.6737e-4, -1.2762e0, ..., 1.3319e-2, -3.2381e-1, 2.3815e-1]]]
Tensor[[1, 9, 1024], f32]
Took 116.840791ms
```
## Similarities
In this example, Bert is used to compute the sentence embeddings for a set of
sentences (hardcoded in the examples). Then cosine similarities are computed for
each sentence pair and they are reported by decreasing values, hence the first
reported pair contains the two sentences that have the highest similarity score.
The sentence embeddings are computed using average pooling through all the
sentence tokens, including some potential padding.
```bash
cargo run --example bert --release
> score: 0.85 'The new movie is awesome' 'The new movie is so great'
> score: 0.61 'The cat sits outside' 'The cat plays in the garden'
> score: 0.52 'I love pasta' 'Do you like pizza?'
> score: 0.23 'The new movie is awesome' 'Do you like pizza?'
> score: 0.22 'I love pasta' 'The new movie is awesome'
```
| candle/candle-examples/examples/bert/README.md/0 | {
"file_path": "candle/candle-examples/examples/bert/README.md",
"repo_id": "candle",
"token_count": 1564
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::{Parser, ValueEnum};
use candle::{DType, IndexOp, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::convnext;
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Which {
Atto,
Femto,
Pico,
Nano,
Tiny,
Small,
Base,
Large,
AttoV2,
FemtoV2,
PicoV2,
NanoV2,
TinyV2,
BaseV2,
LargeV2,
XLarge,
Huge,
}
impl Which {
fn model_filename(&self) -> String {
let name = match self {
Self::Atto => "convnext_atto.d2_in1k",
Self::Femto => "convnext_femto.d1_in1k",
Self::Pico => "convnext_pico.d1_in1k",
Self::Nano => "convnext_nano.d1h_in1k",
Self::Tiny => "convnext_tiny.fb_in1k",
Self::Small => "convnext_small.fb_in1k",
Self::Base => "convnext_base.fb_in1k",
Self::Large => "convnext_large.fb_in1k",
Self::AttoV2 => "convnextv2_atto.fcmae_ft_in1k",
Self::FemtoV2 => "convnextv2_femto.fcmae_ft_in1k",
Self::PicoV2 => "convnextv2_pico.fcmae_ft_in1k",
Self::NanoV2 => "convnextv2_nano.fcmae_ft_in1k",
Self::TinyV2 => "convnextv2_tiny.fcmae_ft_in1k",
Self::BaseV2 => "convnextv2_base.fcmae_ft_in1k",
Self::LargeV2 => "convnextv2_large.fcmae_ft_in1k",
Self::XLarge => "convnext_xlarge.fb_in22k_ft_in1k",
Self::Huge => "convnextv2_huge.fcmae_ft_in1k",
};
format!("timm/{name}")
}
fn config(&self) -> convnext::Config {
match self {
Self::Atto | Self::AttoV2 => convnext::Config::atto(),
Self::Femto | Self::FemtoV2 => convnext::Config::femto(),
Self::Pico | Self::PicoV2 => convnext::Config::pico(),
Self::Nano | Self::NanoV2 => convnext::Config::nano(),
Self::Tiny | Self::TinyV2 => convnext::Config::tiny(),
Self::Small => convnext::Config::small(),
Self::Base | Self::BaseV2 => convnext::Config::base(),
Self::Large | Self::LargeV2 => convnext::Config::large(),
Self::XLarge => convnext::Config::xlarge(),
Self::Huge => convnext::Config::huge(),
}
}
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(value_enum, long, default_value_t=Which::Tiny)]
which: Which,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let model_name = args.which.model_filename();
let api = hf_hub::api::sync::Api::new()?;
let api = api.model(model_name);
api.get("model.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = convnext::convnext(&args.which.config(), 1000, vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/convnext/main.rs/0 | {
"file_path": "candle/candle-examples/examples/convnext/main.rs",
"repo_id": "candle",
"token_count": 1926
} |
//! EfficientNet implementation.
//!
//! https://arxiv.org/abs/1905.11946
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{DType, IndexOp, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::efficientnet::{EfficientNet, MBConvConfig};
use clap::{Parser, ValueEnum};
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Which {
B0,
B1,
B2,
B3,
B4,
B5,
B6,
B7,
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Variant of the model to use.
#[arg(value_enum, long, default_value_t = Which::B2)]
which: Which,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("lmz/candle-efficientnet".into());
let filename = match args.which {
Which::B0 => "efficientnet-b0.safetensors",
Which::B1 => "efficientnet-b1.safetensors",
Which::B2 => "efficientnet-b2.safetensors",
Which::B3 => "efficientnet-b3.safetensors",
Which::B4 => "efficientnet-b4.safetensors",
Which::B5 => "efficientnet-b5.safetensors",
Which::B6 => "efficientnet-b6.safetensors",
Which::B7 => "efficientnet-b7.safetensors",
};
api.get(filename)?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let cfg = match args.which {
Which::B0 => MBConvConfig::b0(),
Which::B1 => MBConvConfig::b1(),
Which::B2 => MBConvConfig::b2(),
Which::B3 => MBConvConfig::b3(),
Which::B4 => MBConvConfig::b4(),
Which::B5 => MBConvConfig::b5(),
Which::B6 => MBConvConfig::b6(),
Which::B7 => MBConvConfig::b7(),
};
let model = EfficientNet::new(vb, cfg, candle_examples::imagenet::CLASS_COUNT as usize)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/efficientnet/main.rs/0 | {
"file_path": "candle/candle-examples/examples/efficientnet/main.rs",
"repo_id": "candle",
"token_count": 1421
} |
// https://github.com/karpathy/llama2.c
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use candle_transformers::models::llama2_c as model;
use candle_transformers::models::llama2_c_weights as weights;
use candle_transformers::models::quantized_llama2_c as qmodel;
mod training;
use clap::{Parser, Subcommand};
use anyhow::{Error as E, Result};
use byteorder::{LittleEndian, ReadBytesExt};
use candle::{IndexOp, Tensor};
use candle_transformers::generation::LogitsProcessor;
use std::io::Write;
use tokenizers::Tokenizer;
use model::{Cache, Config, Llama};
use qmodel::QLlama;
use weights::TransformerWeights;
#[derive(Parser, Debug, Clone)]
struct InferenceCmd {
/// The temperature used to generate samples.
#[arg(long)]
temperature: Option<f64>,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
#[arg(long, default_value = "")]
prompt: String,
/// Config file in binary or safetensors format.
#[arg(long)]
config: Option<String>,
#[arg(long, default_value = "karpathy/tinyllamas")]
model_id: String,
/// The model to be used when getting it from the hub. Possible
/// values are 'stories15M.bin', 'stories42M.bin', see more at:
/// https://huggingface.co/karpathy/tinyllamas/tree/main
#[arg(long, default_value = "stories15M.bin")]
which_model: String,
}
#[derive(Parser, Debug, Clone)]
struct EvaluationCmd {
/// A directory with the pre-tokenized dataset in the format generated by the tinystories.py
/// script from llama2.c https://github.com/karpathy/llama2.c
#[arg(long)]
pretokenized_dir: Option<String>,
#[arg(long, default_value_t = 32)]
batch_size: usize,
/// Config file in binary format.
#[arg(long)]
config: Option<String>,
#[arg(long, default_value = "karpathy/tinyllamas")]
model_id: String,
/// The model to be used when getting it from the hub. Possible
/// values are 'stories15M.bin', 'stories42M.bin', see more at:
/// https://huggingface.co/karpathy/tinyllamas/tree/main
#[arg(long, default_value = "stories15M.bin")]
which_model: String,
}
#[derive(Parser, Debug, Clone)]
pub struct TrainingCmd {
/// A directory with the pre-tokenized dataset in the format generated by the tinystories.py
/// script from llama2.c https://github.com/karpathy/llama2.c
#[arg(long)]
pretokenized_dir: String,
#[arg(long, default_value_t = 32)]
batch_size: usize,
#[arg(long, default_value_t = 0.001)]
learning_rate: f64,
}
#[derive(Subcommand, Debug, Clone)]
enum Task {
Inference(InferenceCmd),
Eval(EvaluationCmd),
Train(TrainingCmd),
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Args {
/// The task to be performed, inference, training or evaluation.
#[command(subcommand)]
task: Option<Task>,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Tokenizer config file.
#[arg(long)]
tokenizer: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
}
impl Args {
fn tokenizer(&self) -> Result<Tokenizer> {
let tokenizer_path = match &self.tokenizer {
Some(config) => std::path::PathBuf::from(config),
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("hf-internal-testing/llama-tokenizer".to_string());
api.get("tokenizer.json")?
}
};
Tokenizer::from_file(tokenizer_path).map_err(E::msg)
}
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
match &args.task {
None => {
let cmd = InferenceCmd {
temperature: None,
top_p: None,
prompt: "".to_string(),
config: None,
model_id: "karpathy/tinyllamas".to_string(),
which_model: "stories15M.bin".to_string(),
};
run_inference(&cmd, &args)?
}
Some(Task::Inference(cmd)) => run_inference(cmd, &args)?,
Some(Task::Eval(cmd)) => run_eval(cmd, &args)?,
Some(Task::Train(cmd)) => training::run(cmd, &args)?,
}
Ok(())
}
enum Model {
Llama(Llama),
QLlama(QLlama),
}
impl Model {
fn forward(&self, xs: &Tensor, pos: usize, cache: &mut Cache) -> anyhow::Result<Tensor> {
match self {
Self::Llama(l) => Ok(l.forward(xs, pos, cache)?),
Self::QLlama(l) => Ok(l.forward(xs, pos, cache)?),
}
}
}
fn run_eval(args: &EvaluationCmd, common_args: &Args) -> Result<()> {
use std::io::BufRead;
let config_path = match &args.config {
Some(config) => std::path::PathBuf::from(config),
None => {
let api = hf_hub::api::sync::Api::new()?;
println!("loading the model weights from {}", args.model_id);
let api = api.model(args.model_id.clone());
api.get(&args.which_model)?
}
};
let tokenizer = common_args.tokenizer()?;
let device = candle_examples::device(common_args.cpu)?;
let mut file = std::fs::File::open(config_path)?;
let config = Config::from_reader(&mut file)?;
let weights = TransformerWeights::from_reader(&mut file, &config, &device)?;
let vb = weights.var_builder(&config, &device)?;
let mut cache = Cache::new(false, &config, vb.pp("rot"))?;
let model = Llama::load(vb, config)?;
let tokens = match &args.pretokenized_dir {
None => {
let api = hf_hub::api::sync::Api::new()?;
let model_id = "roneneldan/TinyStories"; // TODO: Make this configurable.
println!("loading the evaluation dataset from {}", model_id);
let api = api.dataset(model_id.to_string());
let dataset_path = api.get("TinyStories-valid.txt")?;
let file = std::fs::File::open(dataset_path)?;
let file = std::io::BufReader::new(file);
let mut tokens = vec![];
for line in file.lines() {
let line = line?.replace("<|endoftext|>", "<s>");
let line = tokenizer.encode(line, false).map_err(E::msg)?;
tokens.push(line.get_ids().to_vec())
}
tokens.concat()
}
Some(pretokenized_dir) => {
// Use shard 0 for the test split, similar to llama2.c
// https://github.com/karpathy/llama2.c/blob/ce05cc28cf1e3560b873bb21837638a434520a67/tinystories.py#L121
let path = std::path::PathBuf::from(pretokenized_dir).join("data00.bin");
let bytes = std::fs::read(path)?;
// Tokens are encoded as u16.
let mut tokens = vec![0u16; bytes.len() / 2];
std::io::Cursor::new(bytes).read_u16_into::<LittleEndian>(&mut tokens)?;
tokens.into_iter().map(|u| u as u32).collect::<Vec<u32>>()
}
};
println!("dataset loaded and encoded: {} tokens", tokens.len());
let seq_len = model.config.seq_len;
let iter = (0..tokens.len()).step_by(seq_len).flat_map(|start_idx| {
if start_idx + seq_len + 1 > tokens.len() {
None
} else {
let tokens = &tokens[start_idx..start_idx + seq_len + 1];
let inputs = Tensor::new(&tokens[..seq_len], &device);
let targets = Tensor::new(&tokens[1..], &device);
Some(inputs.and_then(|inputs| targets.map(|targets| (inputs, targets))))
}
});
let batch_iter = candle_datasets::Batcher::new_r2(iter).batch_size(args.batch_size);
for inp_tgt in batch_iter {
let (inp, tgt) = inp_tgt?;
let logits = model.forward(&inp, 0, &mut cache)?;
let loss = candle_nn::loss::cross_entropy(&logits.flatten_to(1)?, &tgt.flatten_to(1)?)?;
println!("{}", loss.to_vec0::<f32>()?);
}
Ok(())
}
fn run_inference(args: &InferenceCmd, common_args: &Args) -> Result<()> {
let config_path = match &args.config {
Some(config) => std::path::PathBuf::from(config),
None => {
let api = hf_hub::api::sync::Api::new()?;
println!("loading the model weights from {}", args.model_id);
let api = api.model(args.model_id.clone());
api.get(&args.which_model)?
}
};
let tokenizer = common_args.tokenizer()?;
let device = candle_examples::device(common_args.cpu)?;
let is_gguf = config_path.extension().map_or(false, |v| v == "gguf");
let is_safetensors = config_path
.extension()
.map_or(false, |v| v == "safetensors");
let (model, config, mut cache) = if is_gguf {
let vb = qmodel::VarBuilder::from_gguf(config_path, &device)?;
let (_vocab_size, dim) = vb
.get_no_shape("model.embed_tokens.weight")?
.shape()
.dims2()?;
let config = match dim {
64 => Config::tiny_260k(),
288 => Config::tiny_15m(),
512 => Config::tiny_42m(),
768 => Config::tiny_110m(),
_ => anyhow::bail!("no config for dim {dim}"),
};
let freq_cis_real = vb
.get(
(config.seq_len, config.head_size() / 2),
"rot.freq_cis_real",
)?
.dequantize(&device)?;
let freq_cis_imag = vb
.get(
(config.seq_len, config.head_size() / 2),
"rot.freq_cis_imag",
)?
.dequantize(&device)?;
let fake_vb = candle_nn::VarBuilder::from_tensors(
[
("freq_cis_real".to_string(), freq_cis_real),
("freq_cis_imag".to_string(), freq_cis_imag),
]
.into_iter()
.collect(),
candle::DType::F32,
&device,
);
let cache = model::Cache::new(true, &config, fake_vb)?;
let model = Model::QLlama(QLlama::load(vb, config.clone())?);
(model, config, cache)
} else if is_safetensors {
let config = Config::tiny_15m();
let tensors = candle::safetensors::load(config_path, &device)?;
let vb = candle_nn::VarBuilder::from_tensors(tensors, candle::DType::F32, &device);
let cache = model::Cache::new(true, &config, vb.pp("rot"))?;
let model = Model::Llama(Llama::load(vb, config.clone())?);
(model, config, cache)
} else {
let mut file = std::fs::File::open(config_path)?;
let config = Config::from_reader(&mut file)?;
println!("{config:?}");
let weights = TransformerWeights::from_reader(&mut file, &config, &device)?;
let vb = weights.var_builder(&config, &device)?;
let cache = model::Cache::new(true, &config, vb.pp("rot"))?;
let model = Model::Llama(Llama::load(vb, config.clone())?);
(model, config, cache)
};
println!("starting the inference loop");
let mut logits_processor = LogitsProcessor::new(299792458, args.temperature, args.top_p);
let mut index_pos = 0;
print!("{}", args.prompt);
let mut tokens = tokenizer
.encode(args.prompt.clone(), true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer);
let start_gen = std::time::Instant::now();
for index in 0.. {
if tokens.len() >= config.seq_len {
break;
}
let context_size = if index > 0 { 1 } else { tokens.len() };
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
let input = Tensor::new(ctxt, &device)?.unsqueeze(0)?;
let logits = model.forward(&input, index_pos, &mut cache)?;
let logits = logits.i((0, logits.dim(1)? - 1))?;
let logits = if common_args.repeat_penalty == 1. || tokens.is_empty() {
logits
} else {
let start_at = tokens.len().saturating_sub(common_args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
common_args.repeat_penalty,
&tokens[start_at..],
)?
};
index_pos += ctxt.len();
let next_token = logits_processor.sample(&logits)?;
tokens.push(next_token);
if let Some(t) = tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
let dt = start_gen.elapsed();
println!(
"\n{} tokens generated ({:.2} token/s)\n",
tokens.len(),
tokens.len() as f64 / dt.as_secs_f64(),
);
Ok(())
}
| candle/candle-examples/examples/llama2-c/main.rs/0 | {
"file_path": "candle/candle-examples/examples/llama2-c/main.rs",
"repo_id": "candle",
"token_count": 6004
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Error as E;
use clap::{Parser, ValueEnum};
use candle::{DType, Tensor};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_nn::VarBuilder;
use candle_transformers::models::marian;
use tokenizers::Tokenizer;
#[derive(Clone, Debug, Copy, ValueEnum)]
enum Which {
Base,
Big,
}
// TODO: Maybe add support for the conditional prompt.
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
tokenizer: Option<String>,
#[arg(long)]
tokenizer_dec: Option<String>,
/// Choose the variant of the model to run.
#[arg(long, default_value = "big")]
which: Which,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Use the quantized version of the model.
#[arg(long)]
quantized: bool,
/// Text to be translated
#[arg(long)]
text: String,
}
pub fn main() -> anyhow::Result<()> {
use hf_hub::api::sync::Api;
let args = Args::parse();
let config = match args.which {
Which::Base => marian::Config::opus_mt_fr_en(),
Which::Big => marian::Config::opus_mt_tc_big_fr_en(),
};
let tokenizer = {
let tokenizer = match args.tokenizer {
Some(tokenizer) => std::path::PathBuf::from(tokenizer),
None => {
let name = match args.which {
Which::Base => "tokenizer-marian-base-fr.json",
Which::Big => "tokenizer-marian-fr.json",
};
Api::new()?
.model("lmz/candle-marian".to_string())
.get(name)?
}
};
Tokenizer::from_file(&tokenizer).map_err(E::msg)?
};
let tokenizer_dec = {
let tokenizer = match args.tokenizer_dec {
Some(tokenizer) => std::path::PathBuf::from(tokenizer),
None => {
let name = match args.which {
Which::Base => "tokenizer-marian-base-en.json",
Which::Big => "tokenizer-marian-en.json",
};
Api::new()?
.model("lmz/candle-marian".to_string())
.get(name)?
}
};
Tokenizer::from_file(&tokenizer).map_err(E::msg)?
};
let mut tokenizer_dec = TokenOutputStream::new(tokenizer_dec);
let device = candle_examples::device(args.cpu)?;
let vb = {
let model = match args.model {
Some(model) => std::path::PathBuf::from(model),
None => match args.which {
Which::Base => Api::new()?
.repo(hf_hub::Repo::with_revision(
"Helsinki-NLP/opus-mt-fr-en".to_string(),
hf_hub::RepoType::Model,
"refs/pr/4".to_string(),
))
.get("model.safetensors")?,
Which::Big => Api::new()?
.model("Helsinki-NLP/opus-mt-tc-big-fr-en".to_string())
.get("model.safetensors")?,
},
};
unsafe { VarBuilder::from_mmaped_safetensors(&[&model], DType::F32, &device)? }
};
let mut model = marian::MTModel::new(&config, vb)?;
let mut logits_processor =
candle_transformers::generation::LogitsProcessor::new(1337, None, None);
let encoder_xs = {
let mut tokens = tokenizer
.encode(args.text, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
tokens.push(config.eos_token_id);
let tokens = Tensor::new(tokens.as_slice(), &device)?.unsqueeze(0)?;
model.encoder().forward(&tokens, 0)?
};
let mut token_ids = vec![config.decoder_start_token_id];
for index in 0..1000 {
let context_size = if index >= 1 { 1 } else { token_ids.len() };
let start_pos = token_ids.len().saturating_sub(context_size);
let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?;
let logits = model.decode(&input_ids, &encoder_xs, start_pos)?;
let logits = logits.squeeze(0)?;
let logits = logits.get(logits.dim(0)? - 1)?;
let token = logits_processor.sample(&logits)?;
token_ids.push(token);
if let Some(t) = tokenizer_dec.next_token(token)? {
use std::io::Write;
print!("{t}");
std::io::stdout().flush()?;
}
if token == config.eos_token_id || token == config.forced_eos_token_id {
break;
}
}
if let Some(rest) = tokenizer_dec.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
println!();
Ok(())
}
| candle/candle-examples/examples/marian-mt/main.rs/0 | {
"file_path": "candle/candle-examples/examples/marian-mt/main.rs",
"repo_id": "candle",
"token_count": 2385
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::{Parser, ValueEnum};
use candle::{DType, IndexOp, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::mobileone;
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Which {
S0,
S1,
S2,
S3,
S4,
}
impl Which {
fn model_filename(&self) -> String {
let name = match self {
Self::S0 => "s0",
Self::S1 => "s1",
Self::S2 => "s2",
Self::S3 => "s3",
Self::S4 => "s4",
};
format!("timm/mobileone_{}.apple_in1k", name)
}
fn config(&self) -> mobileone::Config {
match self {
Self::S0 => mobileone::Config::s0(),
Self::S1 => mobileone::Config::s1(),
Self::S2 => mobileone::Config::s2(),
Self::S3 => mobileone::Config::s3(),
Self::S4 => mobileone::Config::s4(),
}
}
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(value_enum, long, default_value_t=Which::S0)]
which: Which,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let model_name = args.which.model_filename();
let api = hf_hub::api::sync::Api::new()?;
let api = api.model(model_name);
api.get("model.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = mobileone::mobileone(&args.which.config(), 1000, vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/mobileone/main.rs/0 | {
"file_path": "candle/candle-examples/examples/mobileone/main.rs",
"repo_id": "candle",
"token_count": 1213
} |
# candle-parler-tts
[Parler-TTS](https://huggingface.co/parler-tts/parler-tts-large-v1) is a large
text-to-speech model with 2.2B parameters trained on ~45K hours of audio data.
The voice can be controlled by a text prompt.
## Run an example
```bash
cargo run --example parler-tts -r -- \
--prompt "Hey, how are you doing today?"
```
In order to specify some prompt for the voice, use the `--description` argument.
```bash
cargo run --example parler-tts -r -- \
--prompt "Hey, how are you doing today?" \
--description "A female speaker delivers a slightly expressive and animated speech with a moderate speed and pitch. The recording is of very high quality, with the speaker's voice sounding clear and very close up."
```
https://github.com/user-attachments/assets/1b16aeac-70a3-4803-8589-4563279bba33
| candle/candle-examples/examples/parler-tts/README.md/0 | {
"file_path": "candle/candle-examples/examples/parler-tts/README.md",
"repo_id": "candle",
"token_count": 260
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::Parser;
use candle_transformers::models::qwen2::{Config as ConfigBase, ModelForCausalLM as ModelBase};
use candle_transformers::models::qwen2_moe::{Config as ConfigMoe, Model as ModelMoe};
use candle::{DType, Device, Tensor};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
enum Model {
Base(ModelBase),
Moe(ModelMoe),
}
impl Model {
fn forward(&mut self, xs: &Tensor, s: usize) -> candle::Result<Tensor> {
match self {
Self::Moe(ref mut m) => m.forward(xs, s),
Self::Base(ref mut m) => m.forward(xs, s),
}
}
}
struct TextGeneration {
model: Model,
device: Device,
tokenizer: TokenOutputStream,
logits_processor: LogitsProcessor,
repeat_penalty: f32,
repeat_last_n: usize,
}
impl TextGeneration {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
temp: Option<f64>,
top_p: Option<f64>,
repeat_penalty: f32,
repeat_last_n: usize,
device: &Device,
) -> Self {
let logits_processor = LogitsProcessor::new(seed, temp, top_p);
Self {
model,
tokenizer: TokenOutputStream::new(tokenizer),
logits_processor,
repeat_penalty,
repeat_last_n,
device: device.clone(),
}
}
fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> {
use std::io::Write;
self.tokenizer.clear();
let mut tokens = self
.tokenizer
.tokenizer()
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
for &t in tokens.iter() {
if let Some(t) = self.tokenizer.next_token(t)? {
print!("{t}")
}
}
std::io::stdout().flush()?;
let mut generated_tokens = 0usize;
let eos_token = match self.tokenizer.get_token("<|endoftext|>") {
Some(token) => token,
None => anyhow::bail!("cannot find the <|endoftext|> token"),
};
let start_gen = std::time::Instant::now();
for index in 0..sample_len {
let context_size = if index > 0 { 1 } else { tokens.len() };
let start_pos = tokens.len().saturating_sub(context_size);
let ctxt = &tokens[start_pos..];
let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
let logits = self.model.forward(&input, start_pos)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let logits = if self.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
generated_tokens += 1;
if next_token == eos_token {
break;
}
if let Some(t) = self.tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
let dt = start_gen.elapsed();
if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
std::io::stdout().flush()?;
println!(
"\n{generated_tokens} tokens generated ({:.2} token/s)",
generated_tokens as f64 / dt.as_secs_f64(),
);
Ok(())
}
}
#[derive(Clone, Copy, Debug, clap::ValueEnum, PartialEq, Eq)]
enum WhichModel {
#[value(name = "0.5b")]
W0_5b,
#[value(name = "1.8b")]
W1_8b,
#[value(name = "4b")]
W4b,
#[value(name = "7b")]
W7b,
#[value(name = "14b")]
W14b,
#[value(name = "72b")]
W72b,
#[value(name = "moe-a2.7b")]
MoeA27b,
#[value(name = "2-0.5b")]
W2_0_5b,
#[value(name = "2-1.5b")]
W2_1_5b,
#[value(name = "2-7b")]
W2_7b,
#[value(name = "2-72b")]
W2_72b,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
use_flash_attn: bool,
#[arg(long)]
prompt: String,
/// The temperature used to generate samples.
#[arg(long)]
temperature: Option<f64>,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(long, short = 'n', default_value_t = 10000)]
sample_len: usize,
#[arg(long)]
model_id: Option<String>,
#[arg(long, default_value = "main")]
revision: String,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
weight_files: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
#[arg(long, default_value = "0.5b")]
model: WhichModel,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature.unwrap_or(0.),
args.repeat_penalty,
args.repeat_last_n
);
let start = std::time::Instant::now();
let api = Api::new()?;
let model_id = match args.model_id {
Some(model_id) => model_id,
None => {
let (version, size) = match args.model {
WhichModel::W2_0_5b => ("2", "0.5B"),
WhichModel::W2_1_5b => ("2", "1.5B"),
WhichModel::W2_7b => ("2", "7B"),
WhichModel::W2_72b => ("2", "72B"),
WhichModel::W0_5b => ("1.5", "0.5B"),
WhichModel::W1_8b => ("1.5", "1.8B"),
WhichModel::W4b => ("1.5", "4B"),
WhichModel::W7b => ("1.5", "7B"),
WhichModel::W14b => ("1.5", "14B"),
WhichModel::W72b => ("1.5", "72B"),
WhichModel::MoeA27b => ("1.5", "MoE-A2.7B"),
};
format!("Qwen/Qwen{version}-{size}")
}
};
let repo = api.repo(Repo::with_revision(
model_id,
RepoType::Model,
args.revision,
));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
let filenames = match args.weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => match args.model {
WhichModel::W0_5b | WhichModel::W2_0_5b | WhichModel::W2_1_5b | WhichModel::W1_8b => {
vec![repo.get("model.safetensors")?]
}
WhichModel::W4b
| WhichModel::W7b
| WhichModel::W2_7b
| WhichModel::W14b
| WhichModel::W72b
| WhichModel::W2_72b
| WhichModel::MoeA27b => {
candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?
}
},
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let start = std::time::Instant::now();
let config_file = repo.get("config.json")?;
let device = candle_examples::device(args.cpu)?;
let dtype = if device.is_cuda() {
DType::BF16
} else {
DType::F32
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? };
let model = match args.model {
WhichModel::MoeA27b => {
let config: ConfigMoe = serde_json::from_slice(&std::fs::read(config_file)?)?;
Model::Moe(ModelMoe::new(&config, vb)?)
}
_ => {
let config: ConfigBase = serde_json::from_slice(&std::fs::read(config_file)?)?;
Model::Base(ModelBase::new(&config, vb)?)
}
};
println!("loaded the model in {:?}", start.elapsed());
let mut pipeline = TextGeneration::new(
model,
tokenizer,
args.seed,
args.temperature,
args.top_p,
args.repeat_penalty,
args.repeat_last_n,
&device,
);
pipeline.run(&args.prompt, args.sample_len)?;
Ok(())
}
| candle/candle-examples/examples/qwen/main.rs/0 | {
"file_path": "candle/candle-examples/examples/qwen/main.rs",
"repo_id": "candle",
"token_count": 4905
} |
# This script exports pre-trained model weights in the safetensors format.
import numpy as np
import torch
import torchvision
from safetensors import torch as stt
m = torchvision.models.resnet50(pretrained=True)
stt.save_file(m.state_dict(), 'resnet50.safetensors')
m = torchvision.models.resnet101(pretrained=True)
stt.save_file(m.state_dict(), 'resnet101.safetensors')
m = torchvision.models.resnet152(pretrained=True)
stt.save_file(m.state_dict(), 'resnet152.safetensors')
| candle/candle-examples/examples/resnet/export_models.py/0 | {
"file_path": "candle/candle-examples/examples/resnet/export_models.py",
"repo_id": "candle",
"token_count": 166
} |
# candle-splade
SPLADE is a neural retrieval model which learns query/document sparse expansion via the BERT MLM head and sparse regularization. Sparse representations benefit from several advantages compared to dense approaches: efficient use of inverted index, explicit lexical match, interpretability... They also seem to be better at generalizing on out-of-domain data. In this example we can do the following two tasks:
- Compute sparse embedding for a given query.
- Compute similarities between a set of sentences using sparse embeddings.
## Sparse Sentence embeddings
SPLADE is used to compute the sparse embedding for a given query. The model weights
are downloaded from the hub on the first run. This makes use of the BertForMaskedLM model.
```bash
cargo run --example splade --release -- --prompt "Here is a test sentence"
> "the out there still house inside position outside stay standing hotel sitting dog animal sit bird cat statue cats"
> [0.10270107, 0.269471, 0.047469813, 0.0016636598, 0.05394874, 0.23105666, 0.037475716, 0.45949644, 0.009062732, 0.06790692, 0.0327835, 0.33122346, 0.16863061, 0.12688516, 0.340983, 0.044972017, 0.47724655, 0.01765311, 0.37331146]
```
```bash
cargo run --example splade --release --features
> score: 0.47 'The new movie is awesome' 'The new movie is so great'
> score: 0.43 'The cat sits outside' 'The cat plays in the garden'
> score: 0.14 'I love pasta' 'Do you like pizza?'
> score: 0.11 'A man is playing guitar' 'The cat plays in the garden'
> score: 0.05 'A man is playing guitar' 'A woman watches TV'
```
| candle/candle-examples/examples/splade/README.md/0 | {
"file_path": "candle/candle-examples/examples/splade/README.md",
"repo_id": "candle",
"token_count": 474
} |
# candle-t5
## Encoder-decoder example:
```bash
$ cargo run --example t5 --release -- --model-id "t5-small" --prompt "translate to German: A beautiful candle." --decode
...
Eine schöne Kerze.
9 tokens generated (2.42 token/s)
```
Variants such as [flan-t5](https://huggingface.co/google/flan-t5-small), [flan-ul2](https://huggingface.co/google/flan-ul2) (with `--revision "refs/pr/25"`), and [Co-EdIT](https://huggingface.co/grammarly/coedit-large) are also supported.
## Translation with [MADLAD-400](https://arxiv.org/abs/2309.04662)
MADLAD-400 is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models.
```bash
cargo run --example t5 --release -- \
--model-id "jbochi/madlad400-3b-mt" \
--prompt "<2de> How are you, my friend?" \
--decode --temperature 0
...
Wie geht es dir, mein Freund?
```
## Sentence embedding example
```bash
$ cargo run --example t5 --release -- --model-id "t5-small" --prompt "A beautiful candle."
...
[[[ 0.0515, -0.0541, -0.0761, ..., -0.0392, 0.1511, -0.0265],
[-0.0974, 0.0998, -0.1659, ..., -0.2450, 0.1738, -0.0164],
[ 0.0624, -0.1024, 0.0430, ..., -0.1388, 0.0564, -0.2962],
[-0.0389, -0.1173, 0.0026, ..., 0.1064, -0.1065, 0.0990],
[ 0.1300, 0.0027, -0.0326, ..., 0.0026, -0.0317, 0.0851]]]
Tensor[[1, 5, 512], f32]
Took 303.766583ms
```
| candle/candle-examples/examples/t5/README.md/0 | {
"file_path": "candle/candle-examples/examples/t5/README.md",
"repo_id": "candle",
"token_count": 608
} |
#include <cmath>
#include <cute/tensor.hpp>
#include <cutlass/cutlass.h>
#include <cutlass/array.h>
#include "utils.h"
namespace flash {
using namespace cute;
////////////////////////////////////////////////////////////////////////////////////////////////////
template <bool Is_causal>
struct Alibi {
const float alibi_slope;
const int max_seqlen_k, max_seqlen_q;
__forceinline__ __device__ Alibi(const float alibi_slope, const int max_seqlen_k, const int max_seqlen_q)
: alibi_slope(alibi_slope)
, max_seqlen_k(max_seqlen_k)
, max_seqlen_q(max_seqlen_q) {
};
template <typename Engine, typename Layout>
__forceinline__ __device__ void apply_alibi(Tensor<Engine, Layout> &tensor,
const int col_idx_offset_,
const int row_idx_offset,
const int warp_row_stride) {
// tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N))
static_assert(Layout::rank == 2, "Only support 2D Tensor");
const int lane_id = threadIdx.x % 32;
const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2;
if constexpr (Is_causal) { // Simpler, we add the same bias vector to all rows
#pragma unroll
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
const int col_idx_base = col_idx_offset + nj * 8;
#pragma unroll
for (int j = 0; j < size<1, 0>(tensor); ++j) {
const int col_idx = col_idx_base + j;
#pragma unroll
for (int mi = 0; mi < size<0>(tensor); ++mi) {
tensor(mi, make_coord(j, nj)) += alibi_slope * col_idx;
}
}
}
} else { // Bias depends on both row_idx and col_idx
#pragma unroll
for (int mi = 0; mi < size<0, 1>(tensor); ++mi) {
const int row_idx_base = row_idx_offset + mi * warp_row_stride;
#pragma unroll
for (int i = 0; i < size<0, 0>(tensor); ++i) {
const int row_idx = row_idx_base + i * 8;
#pragma unroll
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
const int col_idx_base = col_idx_offset + nj * 8;
#pragma unroll
for (int j = 0; j < size<1, 0>(tensor); ++j) {
const int col_idx = col_idx_base + j;
tensor(make_coord(i, mi), make_coord(j, nj)) -= alibi_slope * abs(row_idx + max_seqlen_k - max_seqlen_q - col_idx);
}
}
}
}
}
}
};
} // namespace flash
| candle/candle-flash-attn/kernels/alibi.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/alibi.h",
"repo_id": "candle",
"token_count": 1556
} |
// Copyright (c) 2024, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template<>
void run_mha_fwd_<cutlass::half_t, 192, true>(Flash_fwd_params ¶ms, cudaStream_t stream) {
run_mha_fwd_hdim192<cutlass::half_t, true>(params, stream);
}
| candle/candle-flash-attn/kernels/flash_fwd_hdim192_fp16_causal_sm80.cu/0 | {
"file_path": "candle/candle-flash-attn/kernels/flash_fwd_hdim192_fp16_causal_sm80.cu",
"repo_id": "candle",
"token_count": 138
} |
/******************************************************************************
* Copyright (c) 2024, Tri Dao.
******************************************************************************/
#pragma once
#include <cmath>
#include <cute/tensor.hpp>
#include <cutlass/numeric_types.h>
#include "philox.cuh"
#include "utils.h"
namespace flash {
using namespace cute;
////////////////////////////////////////////////////////////////////////////////////////////////////
template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
__device__ __forceinline__ void thread_reduce_(Tensor<Engine0, Layout0> const &tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
static_assert(Layout0::rank == 2, "Only support 2D Tensor");
static_assert(Layout1::rank == 1, "Only support 1D Tensor");
CUTE_STATIC_ASSERT_V(size<0>(summary) == size<0>(tensor));
#pragma unroll
for (int mi = 0; mi < size<0>(tensor); mi++) {
summary(mi) = zero_init ? tensor(mi, 0) : op(summary(mi), tensor(mi, 0));
#pragma unroll
for (int ni = 1; ni < size<1>(tensor); ni++) {
summary(mi) = op(summary(mi), tensor(mi, ni));
}
}
}
template<typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
__device__ __forceinline__ void quad_allreduce_(Tensor<Engine0, Layout0> &dst, Tensor<Engine1, Layout1> &src, Operator &op) {
CUTE_STATIC_ASSERT_V(size(dst) == size(src));
#pragma unroll
for (int i = 0; i < size(dst); i++){
dst(i) = Allreduce<4>::run(src(i), op);
}
}
template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
__device__ __forceinline__ void reduce_(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
thread_reduce_<zero_init>(tensor, summary, op);
quad_allreduce_(summary, summary, op);
}
template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
__device__ __forceinline__ void reduce_max(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &max){
MaxOp<float> max_op;
reduce_<zero_init>(tensor, max, max_op);
}
template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
__device__ __forceinline__ void reduce_sum(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &sum){
SumOp<float> sum_op;
thread_reduce_<zero_init>(tensor, sum, sum_op);
}
// Apply the exp to all the elements.
template <bool Scale_max=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
__forceinline__ __device__ void scale_apply_exp2(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &max, const float scale) {
static_assert(Layout0::rank == 2, "Only support 2D Tensor");
static_assert(Layout1::rank == 1, "Only support 1D Tensor");
CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor));
#pragma unroll
for (int mi = 0; mi < size<0>(tensor); ++mi) {
// If max is -inf, then all elements must have been -inf (possibly due to masking).
// We don't want (-inf - (-inf)) since that would give NaN.
// If we don't have float around M_LOG2E the multiplication is done in fp64.
const float max_scaled = max(mi) == -INFINITY ? 0.f : max(mi) * (Scale_max ? scale : float(M_LOG2E));
#pragma unroll
for (int ni = 0; ni < size<1>(tensor); ++ni) {
// Instead of computing exp(x - max), we compute exp2(x * log_2(e) -
// max * log_2(e)) This allows the compiler to use the ffma
// instruction instead of fadd and fmul separately.
// The following macro will disable the use of fma.
// See: https://github.com/pytorch/pytorch/issues/121558 for more details
// This macro is set in PyTorch and not FlashAttention
#ifdef UNFUSE_FMA
tensor(mi, ni) = exp2f(__fmul_rn(tensor(mi, ni), scale) - max_scaled);
#else
tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled);
#endif
}
}
}
// Apply the exp to all the elements.
template <bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
__forceinline__ __device__ void max_scale_exp2_sum(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> &max, Tensor<Engine1, Layout1> &sum, const float scale) {
static_assert(Layout0::rank == 2, "Only support 2D Tensor");
static_assert(Layout1::rank == 1, "Only support 1D Tensor");
CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor));
#pragma unroll
for (int mi = 0; mi < size<0>(tensor); ++mi) {
MaxOp<float> max_op;
max(mi) = zero_init ? tensor(mi, 0) : max_op(max(mi), tensor(mi, 0));
#pragma unroll
for (int ni = 1; ni < size<1>(tensor); ni++) {
max(mi) = max_op(max(mi), tensor(mi, ni));
}
max(mi) = Allreduce<4>::run(max(mi), max_op);
// If max is -inf, then all elements must have been -inf (possibly due to masking).
// We don't want (-inf - (-inf)) since that would give NaN.
const float max_scaled = max(mi) == -INFINITY ? 0.f : max(mi) * scale;
sum(mi) = 0;
#pragma unroll
for (int ni = 0; ni < size<1>(tensor); ++ni) {
// Instead of computing exp(x - max), we compute exp2(x * log_2(e) -
// max * log_2(e)) This allows the compiler to use the ffma
// instruction instead of fadd and fmul separately.
tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled);
sum(mi) += tensor(mi, ni);
}
SumOp<float> sum_op;
sum(mi) = Allreduce<4>::run(sum(mi), sum_op);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <int kNRows>
struct Softmax {
using TensorT = decltype(make_tensor<float>(Shape<Int<kNRows>>{}));
TensorT row_max, row_sum;
__forceinline__ __device__ Softmax() {};
template<bool Is_first, bool Check_inf=false, typename Tensor0, typename Tensor1>
__forceinline__ __device__ void softmax_rescale_o(Tensor0 &acc_s, Tensor1 &acc_o, float softmax_scale_log2) {
// Reshape acc_s from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N))
Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout()));
static_assert(decltype(size<0>(scores))::value == kNRows);
if (Is_first) {
flash::template reduce_max</*zero_init=*/true>(scores, row_max);
flash::scale_apply_exp2(scores, row_max, softmax_scale_log2);
flash::reduce_sum</*zero_init=*/true>(scores, row_sum);
} else {
Tensor scores_max_prev = make_fragment_like(row_max);
cute::copy(row_max, scores_max_prev);
flash::template reduce_max</*zero_init=*/false>(scores, row_max);
// Reshape acc_o from (MMA=4, MMA_M, MMA_K) to (nrow=(2, MMA_M), ncol=(2, MMA_K))
Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout()));
static_assert(decltype(size<0>(acc_o_rowcol))::value == kNRows);
#pragma unroll
for (int mi = 0; mi < size(row_max); ++mi) {
float scores_max_cur = !Check_inf
? row_max(mi)
: (row_max(mi) == -INFINITY ? 0.0f : row_max(mi));
float scores_scale = exp2f((scores_max_prev(mi) - scores_max_cur) * softmax_scale_log2);
row_sum(mi) *= scores_scale;
#pragma unroll
for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scores_scale; }
}
flash::scale_apply_exp2(scores, row_max, softmax_scale_log2);
// We don't do the reduce across threads here since we don't need to use the row_sum.
// We do that reduce at the end when we need to normalize the softmax.
flash::reduce_sum</*zero_init=*/false>(scores, row_sum);
}
};
template<bool Is_dropout=false, bool Split=false, typename Tensor0>
__forceinline__ __device__ TensorT normalize_softmax_lse(Tensor0 &acc_o, float softmax_scale, float rp_dropout=1.0) {
SumOp<float> sum_op;
quad_allreduce_(row_sum, row_sum, sum_op);
TensorT lse = make_fragment_like(row_sum);
Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout()));
static_assert(decltype(size<0>(acc_o_rowcol))::value == kNRows);
#pragma unroll
for (int mi = 0; mi < size<0>(acc_o_rowcol); ++mi) {
float sum = row_sum(mi);
float inv_sum = (sum == 0.f || sum != sum) ? 1.f : 1.f / sum;
lse(mi) = (sum == 0.f || sum != sum) ? (Split ? -INFINITY : INFINITY) : row_max(mi) * softmax_scale + __logf(sum);
float scale = !Is_dropout ? inv_sum : inv_sum * rp_dropout;
#pragma unroll
for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scale; }
}
return lse;
};
};
} // namespace flash
| candle/candle-flash-attn/kernels/softmax.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/softmax.h",
"repo_id": "candle",
"token_count": 4008
} |
#include<stdint.h>
#include "cuda_fp16.h"
template<typename T>
__device__ void fill_with(T *buf, T value, const size_t numel) {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
buf[i] = value;
}
}
extern "C" __global__ void fill_u8(uint8_t *buf, uint8_t value, const size_t numel) { fill_with(buf, value, numel); }
extern "C" __global__ void fill_u32(uint32_t *buf, uint32_t value, const size_t numel) { fill_with(buf, value, numel); }
extern "C" __global__ void fill_i64(int64_t *buf, int64_t value, const size_t numel) { fill_with(buf, value, numel); }
extern "C" __global__ void fill_f32(float *buf, float value, const size_t numel) { fill_with(buf, value, numel); }
extern "C" __global__ void fill_f64(double *buf, double value, const size_t numel) { fill_with(buf, value, numel); }
template<typename T>
__device__ void copy2d(const T *src, T *dst, uint32_t d1, uint32_t d2, uint32_t src_s, uint32_t dst_s) {
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= d1 * d2) {
return;
}
uint32_t idx1 = idx / d2;
uint32_t idx2 = idx - d2 * idx1;
dst[idx1 * dst_s + idx2] = src[idx1 * src_s + idx2];
}
#define COPY2D_OP(TYPENAME, FNNAME) \
extern "C" __global__ \
void FNNAME(const TYPENAME *src, TYPENAME *dst, uint32_t d1, uint32_t d2, uint32_t src_s, uint32_t dst_s) { \
copy2d(src, dst, d1, d2, src_s, dst_s); \
} \
COPY2D_OP(float, copy2d_f32)
COPY2D_OP(double, copy2d_f64)
COPY2D_OP(uint8_t, copy2d_u8)
COPY2D_OP(uint32_t, copy2d_u32)
COPY2D_OP(int64_t, copy2d_i64)
#if __CUDA_ARCH__ >= 530
extern "C" __global__ void fill_f16(__half *buf, __half value, const size_t numel) { fill_with(buf, value, numel); }
COPY2D_OP(__half, copy2d_f16)
#endif
#if __CUDA_ARCH__ >= 800
#include <cuda_bf16.h>
extern "C" __global__ void fill_bf16(__nv_bfloat16 *buf, __nv_bfloat16 value, const size_t numel) { fill_with(buf, value, numel); }
COPY2D_OP(__nv_bfloat16, copy2d_bf16)
#endif
| candle/candle-kernels/src/fill.cu/0 | {
"file_path": "candle/candle-kernels/src/fill.cu",
"repo_id": "candle",
"token_count": 919
} |
#include <metal_stdlib>
using namespace metal;
METAL_FUNC uint get_strided_index(
uint idx,
constant size_t &num_dims,
constant size_t *dims,
constant size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void index(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &ids_size,
constant bool &contiguous,
constant size_t *src_dims,
constant size_t *src_strides,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t id_i = (tid / right_size) % ids_size;
const INDEX_TYPENAME input_i = min(input_ids[id_i], (INDEX_TYPENAME)(src_dim_size - 1));
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size / ids_size;
/*
// Force prevent out of bounds indexing
// since there doesn't seem to be a good way to force crash
// No need to check for zero we're only allowing unsized.
*/
const size_t src_i = left_rank_i * src_dim_size * right_size + input_i * right_size + right_rank_i;
const size_t strided_src_i = contiguous ? src_i : get_strided_index(src_i, src_dim_size, src_dims, src_strides);
output[tid] = input[strided_src_i];
}
# define INDEX_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &ids_size, \
constant bool &contiguous, \
constant size_t *src_dims, \
constant size_t *src_strides, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
index<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, contiguous, src_dims, src_strides, input, input_ids, output, tid); \
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void gather(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &ids_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const INDEX_TYPENAME input_i = input_ids[tid];
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size / ids_size;
const size_t src_i = (left_rank_i * src_dim_size + input_i) * right_size + right_rank_i;
output[tid] = input[src_i];
}
# define GATHER_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &ids_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
gather<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void scatter_add(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &dst_dim_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
const INDEX_TYPENAME idx = input_ids[src_i];
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
output[dst_i] += input[src_i];
}
}
# define SCATTER_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &dst_dim_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
scatter_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, input, input_ids, output, tid); \
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void index_add(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &dst_dim_size,
constant size_t &ids_dim_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const INDEX_TYPENAME idx = input_ids[j];
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
output[dst_i] += input[src_i];
}
}
# define INDEX_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &dst_dim_size, \
constant size_t &ids_dim_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
index_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, ids_dim_size, input, input_ids, output, tid); \
}
INDEX_OP(is_i64_f32, int64_t, float)
INDEX_OP(is_i64_f16, int64_t, half)
#if defined(__HAVE_BFLOAT__)
INDEX_OP(is_i64_bf16, int64_t, bfloat)
#endif
INDEX_OP(is_u32_u8, uint32_t, uint8_t)
INDEX_OP(is_u32_u32, uint32_t, uint32_t)
INDEX_OP(is_u32_f32, uint32_t, float)
INDEX_OP(is_u32_f16, uint32_t, half)
#if defined(__HAVE_BFLOAT__)
INDEX_OP(is_u32_bf16, uint32_t, bfloat)
#endif
INDEX_OP(is_u8_u8, uint8_t, uint8_t)
INDEX_OP(is_u8_u32, uint8_t, uint32_t)
INDEX_OP(is_u8_f32, uint8_t, float)
INDEX_OP(is_u8_f16, uint8_t, half)
#if defined(__HAVE_BFLOAT__)
INDEX_OP(is_u8_bf16, uint8_t, bfloat)
#endif
GATHER_OP(gather_i64_f32, int64_t, float)
GATHER_OP(gather_i64_f16, int64_t, half)
GATHER_OP(gather_u32_f32, uint, float)
GATHER_OP(gather_u32_f16, uint, half)
#if defined(__HAVE_BFLOAT__)
GATHER_OP(gather_i64_bf16, int64_t, bfloat)
GATHER_OP(gather_u32_bf16, uint, bfloat)
#endif
GATHER_OP(gather_i64_u32, int64_t, uint)
GATHER_OP(gather_u32_u32, uint, uint)
GATHER_OP(gather_i64_i64, int64_t, int64_t)
GATHER_OP(gather_u32_i64, uint, int64_t)
SCATTER_ADD_OP(sa_u32_f32, uint32_t, float)
SCATTER_ADD_OP(sa_u8_f32, uint8_t, float)
SCATTER_ADD_OP(sa_i64_f32, int64_t, float)
SCATTER_ADD_OP(sa_u32_u32, uint32_t, uint32_t)
SCATTER_ADD_OP(sa_u32_f16, uint32_t, half)
SCATTER_ADD_OP(sa_u8_f16, uint8_t, half)
SCATTER_ADD_OP(sa_i64_f16, int64_t, half)
#if defined(__HAVE_BFLOAT__)
SCATTER_ADD_OP(sa_u32_bf16, uint32_t, bfloat)
SCATTER_ADD_OP(sa_u8_bf16, uint8_t, bfloat)
SCATTER_ADD_OP(sa_i64_bf16, int64_t, bfloat)
#endif
// i64
INDEX_ADD_OP(ia_i64_f16, int64_t, half)
INDEX_ADD_OP(ia_i64_f32, int64_t, float)
INDEX_ADD_OP(ia_i64_i64, int64_t, int64_t)
INDEX_ADD_OP(ia_i64_u32, int64_t, uint32_t)
INDEX_ADD_OP(ia_i64_u8, int64_t, uint8_t)
#if defined(__HAVE_BFLOAT__)
INDEX_ADD_OP(ia_i64_bf16, int64_t, bfloat)
#endif
// u32
INDEX_ADD_OP(ia_u32_f16, uint32_t, half)
INDEX_ADD_OP(ia_u32_f32, uint32_t, float)
INDEX_ADD_OP(ia_u32_i64, uint32_t, int64_t)
INDEX_ADD_OP(ia_u32_u32, uint32_t, uint32_t)
INDEX_ADD_OP(ia_u32_u8, uint32_t, uint8_t)
#if defined(__HAVE_BFLOAT__)
INDEX_ADD_OP(ia_u32_bf16, uint32_t, bfloat)
#endif
// u8
INDEX_ADD_OP(ia_u8_f16, uint8_t, half)
INDEX_ADD_OP(ia_u8_f32, uint8_t, float)
INDEX_ADD_OP(ia_u8_i64, uint8_t, int64_t)
INDEX_ADD_OP(ia_u8_u32, uint8_t, uint32_t)
INDEX_ADD_OP(ia_u8_u8, uint8_t, uint8_t)
#if defined(__HAVE_BFLOAT__)
INDEX_ADD_OP(ia_u8_bf16, uint8_t, bfloat)
#endif
| candle/candle-metal-kernels/src/indexing.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/indexing.metal",
"repo_id": "candle",
"token_count": 4200
} |
use candle_metal_kernels::{call_affine, Kernels};
use metal::objc::rc::autoreleasepool;
use metal::{Device, MTLResourceOptions};
use rand;
use std::any::type_name;
use std::time::Instant;
fn main() {
let device = Device::system_default().unwrap();
let kernels = Kernels::new();
let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>();
let f32_10k = (0..10000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
let f32_100k = (0..100000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}",
"dtype", "kernel", "size", "runs", "total time", "avg time"
);
// f32
run_affine_bench(&device, &kernels, &f32_1k);
run_affine_bench(&device, &kernels, &f32_10k);
run_affine_bench(&device, &kernels, &f32_100k);
}
fn run_affine_bench<T: Clone>(device: &Device, kernels: &Kernels, v: &[T]) {
let command_queue = device.new_command_queue();
let options = MTLResourceOptions::StorageModeManaged;
let iterations = 10000;
let input = device.new_buffer_with_data(
v.as_ptr() as *const core::ffi::c_void,
core::mem::size_of_val(v) as u64,
options,
);
let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options);
let mul: f32 = 1.2345;
let add: f32 = 2.3456;
let total_time = autoreleasepool(|| {
let command_buffer = command_queue.new_command_buffer();
let start = Instant::now();
for _ in 0..iterations {
call_affine(
&device,
command_buffer,
&kernels,
"affine_float",
v.len(),
&input,
&mut output,
mul,
add,
)
.unwrap();
}
command_buffer.commit();
command_buffer.wait_until_completed();
start.elapsed()
});
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}",
type_name::<T>().split("::").last().unwrap(),
"affine",
v.len(),
iterations,
total_time,
total_time / iterations
);
}
| candle/candle-metal-kernels/tmp/affine.rs/0 | {
"file_path": "candle/candle-metal-kernels/tmp/affine.rs",
"repo_id": "candle",
"token_count": 1154
} |
//! Embedding Layer.
use candle::{Result, Tensor};
#[derive(Clone, Debug)]
pub struct Embedding {
embeddings: Tensor,
hidden_size: usize,
}
impl Embedding {
pub fn new(embeddings: Tensor, hidden_size: usize) -> Self {
Self {
embeddings,
hidden_size,
}
}
pub fn embeddings(&self) -> &Tensor {
&self.embeddings
}
/// Get the hidden size of the embedding matrix
pub fn hidden_size(&self) -> usize {
self.hidden_size
}
}
impl crate::Module for Embedding {
fn forward(&self, indexes: &Tensor) -> Result<Tensor> {
let mut final_dims = indexes.dims().to_vec();
final_dims.push(self.hidden_size);
let indexes = indexes.flatten_all()?;
let values = self.embeddings.index_select(&indexes, 0)?;
let values = values.reshape(final_dims)?;
Ok(values)
}
}
pub fn embedding(in_size: usize, out_size: usize, vb: crate::VarBuilder) -> Result<Embedding> {
let embeddings = vb.get_with_hints(
(in_size, out_size),
"weight",
crate::Init::Randn {
mean: 0.,
stdev: 1.,
},
)?;
Ok(Embedding::new(embeddings, out_size))
}
| candle/candle-nn/src/embedding.rs/0 | {
"file_path": "candle/candle-nn/src/embedding.rs",
"repo_id": "candle",
"token_count": 571
} |
//! A `VarMap` is a store that holds named variables.
//!
use candle::{DType, Device, Result, Shape, Tensor, Var};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
/// A `VarMap` is a store that holds named variables. Variables can be retrieved from the stores
/// and new variables can be added by providing some initialization config in case they are
/// missing.
/// `VarMap` structures can be serialized in the safetensors format.
#[derive(Clone)]
pub struct VarMap {
data: Arc<Mutex<HashMap<String, Var>>>,
}
impl VarMap {
/// Create a new empty `VarMap`.
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let data = Arc::new(Mutex::new(HashMap::new()));
Self { data }
}
/// Retrieve all the variables currently stored in the map.
pub fn all_vars(&self) -> Vec<Var> {
let tensor_data = self.data.lock().unwrap();
#[allow(clippy::map_clone)]
tensor_data.values().map(|c| c.clone()).collect::<Vec<_>>()
}
/// Save the map in the safetensors format.
pub fn save<P: AsRef<std::path::Path>>(&self, path: P) -> Result<()> {
let tensor_data = self.data.lock().unwrap();
let data = tensor_data.iter().map(|(k, v)| (k, v.as_tensor()));
safetensors::tensor::serialize_to_file(data, &None, path.as_ref())?;
Ok(())
}
/// Load some values from a safetensors file and modify the existing variables to have these
/// values.
///
/// Note that values for variables that are currently not in the map are not kept.
pub fn load<P: AsRef<std::path::Path>>(&mut self, path: P) -> Result<()> {
let path = path.as_ref();
let data = unsafe { candle::safetensors::MmapedSafetensors::new(path)? };
let mut tensor_data = self.data.lock().unwrap();
for (name, var) in tensor_data.iter_mut() {
let data = data.load(name, var.device())?;
if let Err(err) = var.set(&data) {
candle::bail!("error setting {name} using data from {path:?}: {err}",)
}
}
Ok(())
}
/// Set a named variable to some value.
pub fn set_one<K: AsRef<str>, V: AsRef<Tensor>>(&mut self, name: K, value: V) -> Result<()> {
let tensor_data = self.data.lock().unwrap();
let name = name.as_ref();
match tensor_data.get(name) {
None => candle::bail!("cannot find {name} in VarMap"),
Some(var) => {
if let Err(err) = var.set(value.as_ref()) {
candle::bail!("error setting {name}: {err}",)
}
}
}
Ok(())
}
/// Set some named variables to some values.
///
/// If an error is returned, some of the variables might have already been set to their new
/// values.
pub fn set<I: Iterator<Item = (K, V)>, K: AsRef<str>, V: AsRef<Tensor>>(
&mut self,
iter: I,
) -> Result<()> {
let tensor_data = self.data.lock().unwrap();
for (name, value) in iter {
let name = name.as_ref();
match tensor_data.get(name) {
None => candle::bail!("cannot find {name} in VarMap"),
Some(var) => {
if let Err(err) = var.set(value.as_ref()) {
candle::bail!("error setting {name}: {err}",)
}
}
}
}
Ok(())
}
/// Retrieve or add a new variable.
pub fn get<S: Into<Shape>>(
&self,
shape: S,
path: &str,
init: crate::Init,
dtype: DType,
device: &Device,
) -> Result<Tensor> {
let shape = shape.into();
let mut tensor_data = self.data.lock().unwrap();
if let Some(tensor) = tensor_data.get(path) {
let tensor_shape = tensor.shape();
if &shape != tensor_shape {
candle::bail!("shape mismatch on {path}: {shape:?} <> {tensor_shape:?}")
}
return Ok(tensor.as_tensor().clone());
}
let var = init.var(shape, dtype, device)?;
let tensor = var.as_tensor().clone();
tensor_data.insert(path.to_string(), var);
Ok(tensor)
}
pub fn data(&self) -> &Mutex<HashMap<String, Var>> {
&self.data
}
}
| candle/candle-nn/src/var_map.rs/0 | {
"file_path": "candle/candle-nn/src/var_map.rs",
"repo_id": "candle",
"token_count": 1992
} |
//
// WARNING: This file is automatically generated! Please edit onnx.in.proto.
//
// SPDX-License-Identifier: Apache-2.0
syntax = "proto3";
package onnx;
// Overview
//
// ONNX is an open specification that is comprised of the following components:
//
// 1) A definition of an extensible computation graph model.
// 2) Definitions of standard data types.
// 3) Definitions of built-in operators.
//
// This document describes the syntax of models and their computation graphs,
// as well as the standard data types. Together, they are referred to as the ONNX
// Intermediate Representation, or 'IR' for short.
//
// The normative semantic specification of the ONNX IR is found in docs/IR.md.
// Definitions of the built-in neural network operators may be found in docs/Operators.md.
// Notes
//
// Protobuf compatibility
//
// To simplify framework compatibility, ONNX is defined using the subset of protobuf
// that is compatible with both protobuf v2 and v3. This means that we do not use any
// protobuf features that are only available in one of the two versions.
//
// Here are the most notable contortions we have to carry out to work around
// these limitations:
//
// - No 'map' (added protobuf 3.0). We instead represent mappings as lists
// of key-value pairs, where order does not matter and duplicates
// are not allowed.
// Versioning
//
// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md
//
// To be compatible with both proto2 and proto3, we will use a version number
// that is not defined by the default value but an explicit enum number.
enum Version {
// proto3 requires the first enum value to be zero.
// We add this just to appease the compiler.
_START_VERSION = 0;
// The version field is always serialized and we will use it to store the
// version that the graph is generated from. This helps us set up version
// control.
// For the IR, we are using simple numbers starting with 0x00000001,
// which was the version we published on Oct 10, 2017.
IR_VERSION_2017_10_10 = 0x0000000000000001;
// IR_VERSION 2 published on Oct 30, 2017
// - Added type discriminator to AttributeProto to support proto3 users
IR_VERSION_2017_10_30 = 0x0000000000000002;
// IR VERSION 3 published on Nov 3, 2017
// - For operator versioning:
// - Added new message OperatorSetIdProto
// - Added opset_import in ModelProto
// - For vendor extensions, added domain in NodeProto
IR_VERSION_2017_11_3 = 0x0000000000000003;
// IR VERSION 4 published on Jan 22, 2019
// - Relax constraint that initializers should be a subset of graph inputs
// - Add type BFLOAT16
IR_VERSION_2019_1_22 = 0x0000000000000004;
// IR VERSION 5 published on March 18, 2019
// - Add message TensorAnnotation.
// - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters.
IR_VERSION_2019_3_18 = 0x0000000000000005;
// IR VERSION 6 published on Sep 19, 2019
// - Add support for sparse tensor constants stored in model.
// - Add message SparseTensorProto
// - Add sparse initializers
IR_VERSION_2019_9_19 = 0x0000000000000006;
// IR VERSION 7 published on May 8, 2020
// - Add support to allow function body graph to rely on multiple external opreator sets.
// - Add a list to promote inference graph's initializers to global and
// mutable variables. Global variables are visible in all graphs of the
// stored models.
// - Add message TrainingInfoProto to store initialization
// method and training algorithm. The execution of TrainingInfoProto
// can modify the values of mutable variables.
// - Implicitly add inference graph into each TrainingInfoProto's algorithm.
IR_VERSION_2020_5_8 = 0x0000000000000007;
// IR VERSION 8 published on July 30, 2021
// Introduce TypeProto.SparseTensor
// Introduce TypeProto.Optional
// Added a list of FunctionProtos local to the model
// Deprecated since_version and operator status from FunctionProto
IR_VERSION_2021_7_30 = 0x0000000000000008;
// IR VERSION 9 published on May 5, 2023
// Added AttributeProto to FunctionProto so that default attribute values can be set.
// Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ.
IR_VERSION = 0x0000000000000009;
}
// Attributes
//
// A named attribute containing either singular float, integer, string, graph,
// and tensor values, or repeated float, integer, string, graph, and tensor values.
// An AttributeProto MUST contain the name field, and *only one* of the
// following content fields, effectively enforcing a C/C++ union equivalent.
message AttributeProto {
reserved 12, 16 to 19;
reserved "v";
// Note: this enum is structurally identical to the OpSchema::AttrType
// enum defined in schema.h. If you rev one, you likely need to rev the other.
enum AttributeType {
UNDEFINED = 0;
FLOAT = 1;
INT = 2;
STRING = 3;
TENSOR = 4;
GRAPH = 5;
SPARSE_TENSOR = 11;
TYPE_PROTO = 13;
FLOATS = 6;
INTS = 7;
STRINGS = 8;
TENSORS = 9;
GRAPHS = 10;
SPARSE_TENSORS = 12;
TYPE_PROTOS = 14;
}
// The name field MUST be present for this version of the IR.
string name = 1; // namespace Attribute
// if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
// In this case, this AttributeProto does not contain data, and it's a reference of attribute
// in parent scope.
// NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
string ref_attr_name = 21;
// A human-readable documentation for this attribute. Markdown is allowed.
string doc_string = 13;
// The type field MUST be present for this version of the IR.
// For 0.0.1 versions of the IR, this field was not defined, and
// implementations needed to use has_field heuristics to determine
// which value field was in use. For IR_VERSION 0.0.2 or later, this
// field MUST be set and match the f|i|s|t|... field in use. This
// change was made to accommodate proto3 implementations.
AttributeType type = 20; // discriminator that indicates which field below is in use
// Exactly ONE of the following fields must be present for this version of the IR
float f = 2; // float
int64 i = 3; // int
bytes s = 4; // UTF-8 string
TensorProto t = 5; // tensor value
GraphProto g = 6; // graph
SparseTensorProto sparse_tensor = 22; // sparse tensor value
// Do not use field below, it's deprecated.
// optional ValueProto v = 12; // value - subsumes everything but graph
TypeProto tp = 14; // type proto
repeated float floats = 7; // list of floats
repeated int64 ints = 8; // list of ints
repeated bytes strings = 9; // list of UTF-8 strings
repeated TensorProto tensors = 10; // list of tensors
repeated GraphProto graphs = 11; // list of graph
repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors
repeated TypeProto type_protos = 15;// list of type protos
}
// Defines information on value, including the name, the type, and
// the shape of the value.
message ValueInfoProto {
// This field MUST be present in this version of the IR.
string name = 1; // namespace Value
// This field MUST be present in this version of the IR for
// inputs and outputs of the top-level graph.
TypeProto type = 2;
// A human-readable documentation for this value. Markdown is allowed.
string doc_string = 3;
}
// Nodes
//
// Computation graphs are made up of a DAG of nodes, which represent what is
// commonly called a "layer" or "pipeline stage" in machine learning frameworks.
//
// For example, it can be a node of type "Conv" that takes in an image, a filter
// tensor and a bias tensor, and produces the convolved output.
message NodeProto {
repeated string input = 1; // namespace Value
repeated string output = 2; // namespace Value
// An optional identifier for this node in a graph.
// This field MAY be absent in ths version of the IR.
string name = 3; // namespace Node
// The symbolic identifier of the Operator to execute.
string op_type = 4; // namespace Operator
// The domain of the OperatorSet that specifies the operator named by op_type.
string domain = 7; // namespace Domain
// Additional named attributes.
repeated AttributeProto attribute = 5;
// A human-readable documentation for this node. Markdown is allowed.
string doc_string = 6;
}
// Training information
// TrainingInfoProto stores information for training a model.
// In particular, this defines two functionalities: an initialization-step
// and a training-algorithm-step. Initialization resets the model
// back to its original state as if no training has been performed.
// Training algorithm improves the model based on input data.
//
// The semantics of the initialization-step is that the initializers
// in ModelProto.graph and in TrainingInfoProto.algorithm are first
// initialized as specified by the initializers in the graph, and then
// updated by the "initialization_binding" in every instance in
// ModelProto.training_info.
//
// The field "algorithm" defines a computation graph which represents a
// training algorithm's step. After the execution of a
// TrainingInfoProto.algorithm, the initializers specified by "update_binding"
// may be immediately updated. If the targeted training algorithm contains
// consecutive update steps (such as block coordinate descent methods),
// the user needs to create a TrainingInfoProto for each step.
message TrainingInfoProto {
// This field describes a graph to compute the initial tensors
// upon starting the training process. Initialization graph has no input
// and can have multiple outputs. Usually, trainable tensors in neural
// networks are randomly initialized. To achieve that, for each tensor,
// the user can put a random number operator such as RandomNormal or
// RandomUniform in TrainingInfoProto.initialization.node and assign its
// random output to the specific tensor using "initialization_binding".
// This graph can also set the initializers in "algorithm" in the same
// TrainingInfoProto; a use case is resetting the number of training
// iteration to zero.
//
// By default, this field is an empty graph and its evaluation does not
// produce any output. Thus, no initializer would be changed by default.
GraphProto initialization = 1;
// This field represents a training algorithm step. Given required inputs,
// it computes outputs to update initializers in its own or inference graph's
// initializer lists. In general, this field contains loss node, gradient node,
// optimizer node, increment of iteration count.
//
// An execution of the training algorithm step is performed by executing the
// graph obtained by combining the inference graph (namely "ModelProto.graph")
// and the "algorithm" graph. That is, the actual
// input/initializer/output/node/value_info/sparse_initializer list of
// the training graph is the concatenation of
// "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
// and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
// in that order. This combined graph must satisfy the normal ONNX conditions.
// Now, let's provide a visualization of graph combination for clarity.
// Let the inference graph (i.e., "ModelProto.graph") be
// tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
// and the "algorithm" graph be
// tensor_d -> Add -> tensor_e
// The combination process results
// tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
//
// Notice that an input of a node in the "algorithm" graph may reference the
// output of a node in the inference graph (but not the other way round). Also, inference
// node cannot reference inputs of "algorithm". With these restrictions, inference graph
// can always be run independently without training information.
//
// By default, this field is an empty graph and its evaluation does not
// produce any output. Evaluating the default training step never
// update any initializers.
GraphProto algorithm = 2;
// This field specifies the bindings from the outputs of "initialization" to
// some initializers in "ModelProto.graph.initializer" and
// the "algorithm.initializer" in the same TrainingInfoProto.
// See "update_binding" below for details.
//
// By default, this field is empty and no initializer would be changed
// by the execution of "initialization".
repeated StringStringEntryProto initialization_binding = 3;
// Gradient-based training is usually an iterative procedure. In one gradient
// descent iteration, we apply
//
// x = x - r * g
//
// where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
// gradient of "x" with respect to a chosen loss. To avoid adding assignments
// into the training graph, we split the update equation into
//
// y = x - r * g
// x = y
//
// The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
// tell that "y" should be assigned to "x", the field "update_binding" may
// contain a key-value pair of strings, "x" (key of StringStringEntryProto)
// and "y" (value of StringStringEntryProto).
// For a neural network with multiple trainable (mutable) tensors, there can
// be multiple key-value pairs in "update_binding".
//
// The initializers appears as keys in "update_binding" are considered
// mutable variables. This implies some behaviors
// as described below.
//
// 1. We have only unique keys in all "update_binding"s so that two
// variables may not have the same name. This ensures that one
// variable is assigned up to once.
// 2. The keys must appear in names of "ModelProto.graph.initializer" or
// "TrainingInfoProto.algorithm.initializer".
// 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
// 4. Mutable variables are initialized to the value specified by the
// corresponding initializer, and then potentially updated by
// "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
//
// This field usually contains names of trainable tensors
// (in ModelProto.graph), optimizer states such as momentums in advanced
// stochastic gradient methods (in TrainingInfoProto.graph),
// and number of training iterations (in TrainingInfoProto.graph).
//
// By default, this field is empty and no initializer would be changed
// by the execution of "algorithm".
repeated StringStringEntryProto update_binding = 4;
}
// Models
//
// ModelProto is a top-level file/container format for bundling a ML model and
// associating its computation graph with metadata.
//
// The semantics of the model are described by the associated GraphProto's.
message ModelProto {
// The version of the IR this model targets. See Version enum above.
// This field MUST be present.
int64 ir_version = 1;
// The OperatorSets this model relies on.
// All ModelProtos MUST have at least one entry that
// specifies which version of the ONNX OperatorSet is
// being imported.
//
// All nodes in the ModelProto's graph will bind against the operator
// with the same-domain/same-op_type operator with the HIGHEST version
// in the referenced operator sets.
repeated OperatorSetIdProto opset_import = 8;
// The name of the framework or tool used to generate this model.
// This field SHOULD be present to indicate which implementation/tool/framework
// emitted the model.
string producer_name = 2;
// The version of the framework or tool used to generate this model.
// This field SHOULD be present to indicate which implementation/tool/framework
// emitted the model.
string producer_version = 3;
// Domain name of the model.
// We use reverse domain names as name space indicators. For example:
// `com.facebook.fair` or `com.microsoft.cognitiveservices`
//
// Together with `model_version` and GraphProto.name, this forms the unique identity of
// the graph.
string domain = 4;
// The version of the graph encoded. See Version enum below.
int64 model_version = 5;
// A human-readable documentation for this model. Markdown is allowed.
string doc_string = 6;
// The parameterized graph that is evaluated to execute the model.
GraphProto graph = 7;
// Named metadata values; keys should be distinct.
repeated StringStringEntryProto metadata_props = 14;
// Training-specific information. Sequentially executing all stored
// `TrainingInfoProto.algorithm`s and assigning their outputs following
// the corresponding `TrainingInfoProto.update_binding`s is one training
// iteration. Similarly, to initialize the model
// (as if training hasn't happened), the user should sequentially execute
// all stored `TrainingInfoProto.initialization`s and assigns their outputs
// using `TrainingInfoProto.initialization_binding`s.
//
// If this field is empty, the training behavior of the model is undefined.
repeated TrainingInfoProto training_info = 20;
// A list of function protos local to the model.
//
// Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
// In case of any conflicts the behavior (whether the model local functions are given higher priority,
// or standard operator sets are given higher priotity or this is treated as error) is defined by
// the runtimes.
//
// The operator sets imported by FunctionProto should be compatible with the ones
// imported by ModelProto and other model local FunctionProtos.
// Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
// or by 2 FunctionProtos then versions for the operator set may be different but,
// the operator schema returned for op_type, domain, version combination
// for both the versions should be same for every node in the function body.
//
// One FunctionProto can reference other FunctionProto in the model, however, recursive reference
// is not allowed.
repeated FunctionProto functions = 25;
};
// StringStringEntryProto follows the pattern for cross-proto-version maps.
// See https://developers.google.com/protocol-buffers/docs/proto3#maps
message StringStringEntryProto {
string key = 1;
string value = 2;
};
message TensorAnnotation {
string tensor_name = 1;
// <key, value> pairs to annotate tensor specified by <tensor_name> above.
// The keys used in the mapping below must be pre-defined in ONNX spec.
// For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
// quantization parameter keys.
repeated StringStringEntryProto quant_parameter_tensor_names = 2;
}
// Graphs
//
// A graph defines the computational logic of a model and is comprised of a parameterized
// list of nodes that form a directed acyclic graph based on their inputs and outputs.
// This is the equivalent of the "network" or "graph" in many deep learning
// frameworks.
message GraphProto {
// The nodes in the graph, sorted topologically.
repeated NodeProto node = 1;
// The name of the graph.
string name = 2; // namespace Graph
// A list of named tensor values, used to specify constant inputs of the graph.
// Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
// The name MUST be unique across both initializer and sparse_initializer,
// but the name MAY also appear in the input list.
repeated TensorProto initializer = 5;
// Initializers (see above) stored in sparse format.
repeated SparseTensorProto sparse_initializer = 15;
// A human-readable documentation for this graph. Markdown is allowed.
string doc_string = 10;
// The inputs and outputs of the graph.
repeated ValueInfoProto input = 11;
repeated ValueInfoProto output = 12;
// Information for the values in the graph. The ValueInfoProto.name's
// must be distinct. It is optional for a value to appear in value_info list.
repeated ValueInfoProto value_info = 13;
// This field carries information to indicate the mapping among a tensor and its
// quantization parameter tensors. For example:
// For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
// which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
repeated TensorAnnotation quantization_annotation = 14;
reserved 3, 4, 6 to 9;
reserved "ir_version", "producer_version", "producer_tag", "domain";
}
// Tensors
//
// A serialized tensor value.
message TensorProto {
enum DataType {
UNDEFINED = 0;
// Basic types.
FLOAT = 1; // float
UINT8 = 2; // uint8_t
INT8 = 3; // int8_t
UINT16 = 4; // uint16_t
INT16 = 5; // int16_t
INT32 = 6; // int32_t
INT64 = 7; // int64_t
STRING = 8; // string
BOOL = 9; // bool
// IEEE754 half-precision floating-point format (16 bits wide).
// This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
FLOAT16 = 10;
DOUBLE = 11;
UINT32 = 12;
UINT64 = 13;
COMPLEX64 = 14; // complex with float32 real and imaginary components
COMPLEX128 = 15; // complex with float64 real and imaginary components
// Non-IEEE floating-point format based on IEEE754 single-precision
// floating-point number truncated to 16 bits.
// This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
BFLOAT16 = 16;
// Non-IEEE floating-point format based on papers
// FP8 Formats for Deep Learning, https://arxiv.org/abs/2209.05433,
// 8-bit Numerical Formats For Deep Neural Networks, https://arxiv.org/pdf/2206.02915.pdf.
// Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
// The computation usually happens inside a block quantize / dequantize
// fused by the runtime.
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients
FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero
// Future extensions go here.
}
// The shape of the tensor.
repeated int64 dims = 1;
// The data type of the tensor.
// This field MUST have a valid TensorProto.DataType value
int32 data_type = 2;
// For very large tensors, we may want to store them in chunks, in which
// case the following fields will specify the segment that is stored in
// the current TensorProto.
message Segment {
int64 begin = 1;
int64 end = 2;
}
Segment segment = 3;
// Tensor content must be organized in row-major order.
//
// Depending on the data_type field, exactly one of the fields below with
// name ending in _data is used to store the elements of the tensor.
// For float and complex64 values
// Complex64 tensors are encoded as a single array of floats,
// with the real components appearing in odd numbered positions,
// and the corresponding imaginary component appearing in the
// subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
// is encoded as [1.0, 2.0 ,3.0 ,4.0]
// When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
repeated float float_data = 4 [packed = true];
// For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
// float16 and float8 values must be bit-wise converted to an uint16_t prior
// to writing to the buffer.
// When this field is present, the data_type field MUST be
// INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
repeated int32 int32_data = 5 [packed = true];
// For strings.
// Each element of string_data is a UTF-8 encoded Unicode
// string. No trailing null, no leading BOM. The protobuf "string"
// scalar type is not used to match ML community conventions.
// When this field is present, the data_type field MUST be STRING
repeated bytes string_data = 6;
// For int64.
// When this field is present, the data_type field MUST be INT64
repeated int64 int64_data = 7 [packed = true];
// Optionally, a name for the tensor.
string name = 8; // namespace Value
// A human-readable documentation for this tensor. Markdown is allowed.
string doc_string = 12;
// Serializations can either use one of the fields above, or use this
// raw bytes field. The only exception is the string case, where one is
// required to store the content in the repeated bytes string_data field.
//
// When this raw_data field is used to store tensor value, elements MUST
// be stored in as fixed-width, little-endian order.
// Floating-point data types MUST be stored in IEEE 754 format.
// Complex64 elements must be written as two consecutive FLOAT values, real component first.
// Complex128 elements must be written as two consecutive DOUBLE values, real component first.
// Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
//
// Note: the advantage of specific field rather than the raw_data field is
// that in some cases (e.g. int data), protobuf does a better packing via
// variable length storage, and may lead to smaller binary footprint.
// When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
bytes raw_data = 9;
// Data can be stored inside the protobuf file using type-specific fields or raw_data.
// Alternatively, raw bytes data can be stored in an external file, using the external_data field.
// external_data stores key-value pairs describing data location. Recognized keys are:
// - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
// protobuf model was stored
// - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
// Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
// - "length" (optional) - number of bytes containing data. Integer stored as string.
// - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
repeated StringStringEntryProto external_data = 13;
// Location of the data for this tensor. MUST be one of:
// - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field.
// - EXTERNAL - data stored in an external location as described by external_data field.
enum DataLocation {
DEFAULT = 0;
EXTERNAL = 1;
}
// If value not set, data is stored in raw_data (if set) otherwise in type-specified field.
DataLocation data_location = 14;
// For double
// Complex128 tensors are encoded as a single array of doubles,
// with the real components appearing in odd numbered positions,
// and the corresponding imaginary component appearing in the
// subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
// is encoded as [1.0, 2.0 ,3.0 ,4.0]
// When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
repeated double double_data = 10 [packed = true];
// For uint64 and uint32 values
// When this field is present, the data_type field MUST be
// UINT32 or UINT64
repeated uint64 uint64_data = 11 [packed = true];
}
// A serialized sparse-tensor value
message SparseTensorProto {
// The sequence of non-default values are encoded as a tensor of shape [NNZ].
// The default-value is zero for numeric tensors, and empty-string for string tensors.
// values must have a non-empty name present which serves as a name for SparseTensorProto
// when used in sparse_initializer list.
TensorProto values = 1;
// The indices of the non-default values, which may be stored in one of two formats.
// (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
// corresponding to the j-th index of the i-th value (in the values tensor).
// (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
// must be the linearized-index of the i-th value (in the values tensor).
// The linearized-index can be converted into an index tuple (k_1,...,k_rank)
// using the shape provided below.
// The indices must appear in ascending order without duplication.
// In the first format, the ordering is lexicographic-ordering:
// e.g., index-value [1,4] must appear before [2,1]
TensorProto indices = 2;
// The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
repeated int64 dims = 3;
}
// Defines a tensor shape. A dimension can be either an integer value
// or a symbolic variable. A symbolic variable represents an unknown
// dimension.
message TensorShapeProto {
message Dimension {
oneof value {
int64 dim_value = 1;
string dim_param = 2; // namespace Shape
};
// Standard denotation can optionally be used to denote tensor
// dimensions with standard semantic descriptions to ensure
// that operations are applied to the correct axis of a tensor.
// Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
// for pre-defined dimension denotations.
string denotation = 3;
};
repeated Dimension dim = 1;
}
// Types
//
// The standard ONNX data types.
message TypeProto {
message Tensor {
// This field MUST NOT have the value of UNDEFINED
// This field MUST have a valid TensorProto.DataType value
// This field MUST be present for this version of the IR.
int32 elem_type = 1;
TensorShapeProto shape = 2;
}
// repeated T
message Sequence {
// The type and optional shape of each element of the sequence.
// This field MUST be present for this version of the IR.
TypeProto elem_type = 1;
};
// map<K,V>
message Map {
// This field MUST have a valid TensorProto.DataType value
// This field MUST be present for this version of the IR.
// This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING
int32 key_type = 1;
// This field MUST be present for this version of the IR.
TypeProto value_type = 2;
};
// wrapper for Tensor, Sequence, or Map
message Optional {
// The type and optional shape of the element wrapped.
// This field MUST be present for this version of the IR.
// Possible values correspond to OptionalProto.DataType enum
TypeProto elem_type = 1;
};
message SparseTensor {
// This field MUST NOT have the value of UNDEFINED
// This field MUST have a valid TensorProto.DataType value
// This field MUST be present for this version of the IR.
int32 elem_type = 1;
TensorShapeProto shape = 2;
}
oneof value {
// The type of a tensor.
Tensor tensor_type = 1;
// NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values
// as input and output to graphs and nodes. These types are needed to naturally
// support classical ML operators. DNN operators SHOULD restrict their input
// and output types to tensors.
// The type of a sequence.
Sequence sequence_type = 4;
// The type of a map.
Map map_type = 5;
// The type of an optional.
Optional optional_type = 9;
// Type of the sparse tensor
SparseTensor sparse_tensor_type = 8;
}
// An optional denotation can be used to denote the whole
// type with a standard semantic description as to what is
// stored inside. Refer to https://github.com/onnx/onnx/blob/main/docs/TypeDenotation.md#type-denotation-definition
// for pre-defined type denotations.
string denotation = 6;
}
// Operator Sets
//
// OperatorSets are uniquely identified by a (domain, opset_version) pair.
message OperatorSetIdProto {
// The domain of the operator set being identified.
// The empty string ("") or absence of this field implies the operator
// set that is defined as part of the ONNX specification.
// This field MUST be present in this version of the IR when referring to any other operator set.
string domain = 1;
// The version of the operator set being identified.
// This field MUST be present in this version of the IR.
int64 version = 2;
}
// Operator/function status.
enum OperatorStatus {
EXPERIMENTAL = 0;
STABLE = 1;
}
message FunctionProto {
// The name of the function, similar usage of op_type in OperatorProto.
// Combined with FunctionProto.domain, this forms the unique identity of
// the FunctionProto.
string name = 1;
// Deprecated since IR Version 8
// optional int64 since_version = 2;
reserved 2;
reserved "since_version";
// Deprecated since IR Version 8
// optional OperatorStatus status = 3;
reserved 3;
reserved "status";
// The inputs and outputs of the function.
repeated string input = 4;
repeated string output = 5;
// The attribute parameters of the function.
// It is for function parameters without default values.
repeated string attribute = 6;
// The attribute protos of the function.
// It is for function attributes with default values.
// A function attribute shall be represented either as
// a string attribute or an AttributeProto, not both.
repeated AttributeProto attribute_proto = 11;
// The nodes in the function.
repeated NodeProto node = 7;
// A human-readable documentation for this function. Markdown is allowed.
string doc_string = 8;
// The OperatorSets this function body (graph) relies on.
//
// All nodes in the function body (graph) will bind against the operator
// with the same-domain/same-op_type operator with the HIGHEST version
// in the referenced operator sets. This means at most one version can be relied
// for one domain.
//
// The operator sets imported by FunctionProto should be compatible with the ones
// imported by ModelProto. Example, if same operator set say 'A' is imported by FunctionProto
// and ModelProto then versions for the operator set may be different but,
// the operator schema returned for op_type, domain, version combination
// for both the versions should be same.
repeated OperatorSetIdProto opset_import = 9;
// The domain which this function belongs to. Combined with FunctionProto.name, this forms the unique identity of
// the FunctionProto.
string domain = 10;
}
// For using protobuf-lite
option optimize_for = LITE_RUNTIME;
| candle/candle-onnx/src/onnx.proto3/0 | {
"file_path": "candle/candle-onnx/src/onnx.proto3",
"repo_id": "candle",
"token_count": 10183
} |
# Generated content DO NOT EDIT
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence
from os import PathLike
from candle.typing import _ArrayLike, Device, Scalar, Index, Shape
from candle import Tensor, DType, QTensor
@staticmethod
def silu(tensor: Tensor) -> Tensor:
"""
Applies the Sigmoid Linear Unit (SiLU) function to a given tensor.
"""
pass
@staticmethod
def softmax(tensor: Tensor, dim: int) -> Tensor:
"""
Applies the Softmax function to a given tensor.#
"""
pass
| candle/candle-pyo3/py_src/candle/nn/__init__.pyi/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/nn/__init__.pyi",
"repo_id": "candle",
"token_count": 181
} |
use ::candle::Tensor;
use pyo3::prelude::*;
#[derive(Clone, Debug)]
/// Represents an absolute shape e.g. (1, 2, 3)
pub struct PyShape(Vec<usize>);
impl<'source> pyo3::FromPyObject<'source> for PyShape {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
if ob.is_none() {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"Shape cannot be None",
));
}
let tuple = ob.downcast::<pyo3::types::PyTuple>()?;
if tuple.len() == 1 {
let first_element = tuple.get_item(0)?;
let dims: Vec<usize> = pyo3::FromPyObject::extract_bound(&first_element)?;
Ok(PyShape(dims))
} else {
let dims: Vec<usize> = pyo3::FromPyObject::extract_bound(tuple)?;
Ok(PyShape(dims))
}
}
}
impl From<PyShape> for ::candle::Shape {
fn from(val: PyShape) -> Self {
val.0.into()
}
}
#[derive(Clone, Debug)]
/// Represents a shape with a hole in it e.g. (1, -1, 3)
pub struct PyShapeWithHole(Vec<isize>);
impl<'source> pyo3::FromPyObject<'source> for PyShapeWithHole {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
if ob.is_none() {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"Shape cannot be None",
));
}
let tuple = ob.downcast::<pyo3::types::PyTuple>()?;
let dims: Vec<isize> = if tuple.len() == 1 {
let first_element = tuple.get_item(0)?;
pyo3::FromPyObject::extract_bound(&first_element)?
} else {
pyo3::FromPyObject::extract_bound(tuple)?
};
// Ensure we have only positive numbers and at most one "hole" (-1)
let negative_ones = dims.iter().filter(|&&x| x == -1).count();
let any_invalid_dimensions = dims.iter().any(|&x| x < -1 || x == 0);
if negative_ones > 1 || any_invalid_dimensions {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!(
"Invalid dimension in shape: {:?}",
dims
)));
}
Ok(PyShapeWithHole(dims))
}
}
impl PyShapeWithHole {
/// Returns `true` if the shape is absolute e.g. (1, 2, 3)
pub fn is_absolute(&self) -> bool {
self.0.iter().all(|x| *x > 0)
}
/// Convert a relative shape to an absolute shape e.g. (1, -1) -> (1, 12)
pub fn to_absolute(&self, t: &Tensor) -> PyResult<PyShape> {
if self.is_absolute() {
return Ok(PyShape(
self.0.iter().map(|x| *x as usize).collect::<Vec<usize>>(),
));
}
let mut elements = t.elem_count();
let mut new_dims: Vec<usize> = vec![];
for dim in self.0.iter() {
if *dim > 0 {
new_dims.push(*dim as usize);
elements /= *dim as usize;
} else if *dim == -1 {
new_dims.push(elements);
} else {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!(
"Invalid dimension in shape: {}",
dim
)));
}
}
Ok(PyShape(new_dims))
}
}
| candle/candle-pyo3/src/shape.rs/0 | {
"file_path": "candle/candle-pyo3/src/shape.rs",
"repo_id": "candle",
"token_count": 1664
} |
//! Based from the Stanford Hazy Research group.
//!
//! See "Simple linear attention language models balance the recall-throughput tradeoff", Arora et al. 2024
//! - Simple linear attention language models balance the recall-throughput tradeoff. [Arxiv](https://arxiv.org/abs/2402.18668)
//! - [Github Rep](https://github.com/HazyResearch/based)
//! - [Blogpost](https://hazyresearch.stanford.edu/blog/2024-03-03-based)
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{
conv1d_no_bias, linear, linear_no_bias, ops::softmax_last_dim, rms_norm, Conv1d, Conv1dConfig,
Func, Linear, RmsNorm, VarBuilder,
};
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct LinearAttentionFeatureMapConfig {
input_dim: usize,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct LinearAttentionConfig {
num_heads: usize,
feature_dim: usize,
feature_map: LinearAttentionFeatureMapConfig,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct SlidingWindowAttentionConfig {
num_heads: usize,
window_size: usize,
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
vocab_size: usize,
#[serde(rename = "n_embd")]
hidden_size: usize,
#[serde(rename = "n_inner")]
intermediate_size: usize,
#[serde(rename = "n_layer")]
num_hidden_layers: usize,
#[serde(rename = "n_head")]
num_attention_heads: usize,
layer_norm_epsilon: f64,
#[serde(default = "default_rope", rename = "rotary_emb_base")]
rope_theta: f64,
alt_mixer_layers: Vec<usize>,
alt_mixer_2_layers: Vec<usize>,
#[serde(rename = "alt_mixer")]
la: LinearAttentionConfig,
#[serde(rename = "alt_mixer_2")]
swa: SlidingWindowAttentionConfig,
}
fn default_rope() -> f64 {
10_000.0
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
fc1: Linear,
fc2: Linear,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let fc1 = linear_no_bias(cfg.hidden_size, cfg.hidden_size * 4, vb.pp("fc1"))?;
let fc2 = linear_no_bias(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?;
Ok(Self { fc1, fc2 })
}
}
// Swiglu implementation.
// Not using Activation::Swiglu because this has the gate and y arguments switched compared to the version in candle-nn/src/ops.rs
fn swiglu(xs: &Tensor) -> Result<Tensor> {
let xs = xs.chunk(2, D::Minus1)?;
&xs[1].silu()? * &xs[0]
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.apply(&self.fc1)?;
let xs = swiglu(&xs)?;
let xs = xs.apply(&self.fc2)?;
Ok(xs)
}
}
// A gated convolutional block.
#[derive(Debug, Clone)]
struct BasedConv {
in_proj: Linear,
out_proj: Linear,
conv: Conv1d,
state: Tensor,
}
impl BasedConv {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dim = cfg.hidden_size * 2;
let conv1d_cfg = Conv1dConfig {
groups: dim,
padding: 2,
..Default::default()
};
let in_proj = linear(cfg.hidden_size, cfg.hidden_size * 4, vb.pp("in_proj"))?;
let out_proj = linear(dim, cfg.hidden_size, vb.pp("out_proj"))?;
let conv = conv1d_no_bias(dim, dim, 3, conv1d_cfg, vb.pp("conv.conv"))?;
let state = Tensor::zeros((1, dim, 3), vb.dtype(), vb.device())?;
Ok(Self {
in_proj,
out_proj,
conv,
state,
})
}
fn step(&mut self, xs: &Tensor) -> Result<Tensor> {
self.state = self.state.roll(-1, D::Minus1)?;
let (_, _, l) = self.state.dims3()?;
self.state = self.state.narrow(D::Minus1, 0, l - 1)?;
self.state = Tensor::cat(&[&self.state, &xs.transpose(1, 2)?], 2)?;
let xs = (&self.state * self.conv.weight().permute((1, 0, 2))?)?
.sum_keepdim(0)?
.sum(D::Minus1)?;
let xs = xs.unsqueeze(1)?;
Ok(xs)
}
fn forward(&mut self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let xs = xs.apply(&self.in_proj)?;
let us = xs.chunk(2, D::Minus1)?;
let (_b, l, _d) = us[0].dims3()?;
let u_conv = if seqlen_offset > 0 {
self.step(&us[0])?
} else {
let k = std::cmp::min(3, l);
self.state = self.state.narrow(D::Minus1, 0, 3 - k)?;
let xs = us[0].narrow(1, l - k, k)?.transpose(1, 2)?;
self.state = Tensor::cat(&[&self.state, &xs], 2)?;
us[0]
.transpose(1, 2)?
.apply(&self.conv)?
.narrow(D::Minus1, 0, l)?
.transpose(1, 2)?
};
let u_conv = u_conv.silu()?;
let v = u_conv.broadcast_mul(&us[1])?;
let xs = v.apply(&self.out_proj)?;
Ok(xs)
}
}
// Linear attention approximating softmax using second order Taylor polynomials.
#[derive(Debug, Clone)]
struct LinearAttention {
proj_q: Linear,
proj_k: Linear,
proj_v: Linear,
out_proj: Linear,
feature_dim: usize,
num_heads: usize,
input_dim: usize,
k_state: Tensor,
kv_state: Tensor,
}
impl LinearAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let input_dim = cfg.la.feature_map.input_dim;
let out_proj = linear_no_bias(cfg.hidden_size, cfg.hidden_size, vb.pp("out_proj"))?;
let proj_k = linear_no_bias(
cfg.hidden_size,
cfg.la.num_heads * cfg.la.feature_dim,
vb.pp("proj_k"),
)?;
let proj_q = linear_no_bias(
cfg.hidden_size,
cfg.la.num_heads * cfg.la.feature_dim,
vb.pp("proj_q"),
)?;
let proj_v = linear_no_bias(cfg.hidden_size, cfg.hidden_size, vb.pp("proj_v"))?;
let expanded_size = cfg.la.feature_dim.pow(2) + cfg.la.feature_dim + 1;
let k_state = Tensor::zeros(
(1, cfg.la.num_heads, 1, 1, expanded_size),
vb.dtype(),
vb.device(),
)?;
let kv_state = Tensor::zeros(
(1, cfg.la.num_heads, cfg.la.feature_dim, expanded_size),
vb.dtype(),
vb.device(),
)?;
Ok(Self {
proj_q,
proj_k,
proj_v,
out_proj,
feature_dim: cfg.la.feature_dim,
num_heads: cfg.la.num_heads,
input_dim,
k_state,
kv_state,
})
}
fn taylor_expansion(&self) -> Result<Func<'static>> {
let r2 = std::f64::consts::SQRT_2;
let rd = (self.input_dim as f64).sqrt();
let rrd = rd.sqrt();
Ok(Func::new(move |xs| {
let dims = xs.dims();
let mut d = dims.to_vec();
if let Some(last) = d.last_mut() {
*last = 1;
};
let x = xs
.unsqueeze(D::Minus1)?
.broadcast_mul(&xs.unsqueeze(D::Minus2)?)?;
let x = (x.flatten_from(D::Minus2)? / r2)?;
let o = Tensor::ones(d, xs.dtype(), xs.device())?;
let x = Tensor::cat(&[o, (xs / rrd)?, (&x / rd)?], D::Minus1)?;
Ok(x)
}))
}
fn forward(&mut self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let eps = 1e-12;
let feature_map = self.taylor_expansion()?;
let (b, l, d) = xs.dims3()?;
let q = xs.apply(&self.proj_q)?;
let k = xs.apply(&self.proj_k)?;
let v = xs.apply(&self.proj_v)?;
let q = q
.reshape((b, l, self.num_heads, self.feature_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b, l, self.num_heads, self.feature_dim))?
.transpose(1, 2)?
.contiguous()?;
let v = v
.reshape((b, l, self.num_heads, d / self.num_heads))?
.transpose(1, 2)?
.contiguous()?;
let q = feature_map.forward(&q)?;
let k = feature_map.forward(&k)?;
let y = if seqlen_offset > 0 {
let (_b, _h, l, _d) = k.dims4()?;
let q = q.unsqueeze(D::Minus2)?;
let k = k.unsqueeze(D::Minus2)?;
let v = v.unsqueeze(D::Minus1)?;
let kn = k.narrow(D::Minus1, l - 1, 1)?;
let vn = v.narrow(D::Minus1, l - 1, 1)?;
self.k_state = self.k_state.broadcast_add(&kn)?;
self.kv_state = self.kv_state.broadcast_add(&kn.broadcast_mul(&vn)?)?;
let num = q.broadcast_mul(&self.kv_state)?.sum(D::Minus1)?;
let den = (q.broadcast_mul(&self.k_state)?.sum(D::Minus1)? + eps)?;
num.broadcast_div(&den)?
} else {
self.k_state = k.sum(2)?.unsqueeze(2)?.unsqueeze(3)?;
self.kv_state = k
.transpose(2, 3)?
.matmul(&v)?
.transpose(2, 3)?
.unsqueeze(2)?;
let aqk = q.matmul(&k.transpose(D::Minus1, D::Minus2)?)?;
let tril = Tensor::tril2(l, aqk.dtype(), aqk.device())?;
let aqk = aqk.broadcast_mul(&tril)?.matmul(&v)?;
let z = (1f64 / (q.mul(&k.cumsum(2)?)?.sum(D::Minus1)? + eps)?)?;
aqk.broadcast_mul(&z.unsqueeze(D::Minus1)?)?
};
let (b, h, l, d) = y.dims4()?;
let y = y.permute((0, 2, 1, 3))?.reshape((b, l, h * d))?;
let y = self.out_proj.forward(&y)?;
Ok(y)
}
}
// Rotary embeddings used in local attention.
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = 2048; // Hardcoded, missing from config.
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
// Local attention using a small sliding window.
#[derive(Debug, Clone)]
struct SlidingWindowAttention {
wqkv: Linear,
out_proj: Linear,
num_heads: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl SlidingWindowAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let num_heads = cfg.swa.num_heads;
let head_dim = hidden_size / num_heads;
let out_proj = linear_no_bias(hidden_size, hidden_size, vb.pp("out_proj"))?;
let wqkv = linear_no_bias(hidden_size, hidden_size * 3, vb.pp("Wqkv"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
Ok(Self {
wqkv,
out_proj,
hidden_size,
num_heads,
head_dim,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let qkv = xs.apply(&self.wqkv)?;
let qkv = qkv.reshape((b_sz, q_len, 3, (), self.head_dim))?;
let q = qkv.i((.., .., 0))?;
let k = qkv.i((.., .., 1))?;
let v = qkv.i((.., .., 2))?;
let q = q
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let (q, k) = self
.rotary_emb
.apply_rotary_emb_qkv(&q, &k, seqlen_offset)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 2)?;
let v = Tensor::cat(&[prev_v, &v], 2)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
let out = attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.out_proj)?;
Ok(out)
}
}
// The model layers use three types of mixers.
#[derive(Debug, Clone)]
enum SequenceMixer {
Based(BasedConv),
Linear(LinearAttention),
Sliding(SlidingWindowAttention),
}
impl SequenceMixer {
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
match self {
Self::Based(b) => b.forward(xs, pos),
Self::Linear(b) => b.forward(xs, pos),
Self::Sliding(b) => b.forward(xs, attention_mask, pos),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
mlp: MLP,
norm1: RmsNorm,
norm2: RmsNorm,
mixer: SequenceMixer,
}
impl DecoderLayer {
fn new(layer_idx: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let norm1 = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("norm1"))?;
let norm2 = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("norm2"))?;
let l_attn = cfg.alt_mixer_layers.contains(&layer_idx);
let sw_attn = cfg.alt_mixer_2_layers.contains(&layer_idx);
let mixer = if l_attn {
SequenceMixer::Linear(LinearAttention::new(cfg, vb.pp("mixer"))?)
} else if sw_attn {
SequenceMixer::Sliding(SlidingWindowAttention::new(cfg, vb.pp("mixer"))?)
} else {
SequenceMixer::Based(BasedConv::new(cfg, vb.pp("mixer"))?)
};
Ok(Self {
mlp,
norm1,
norm2,
mixer,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.norm1.forward(xs)?;
let xs = self.mixer.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: super::with_tracing::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vocab_size = cfg.vocab_size + (8 - cfg.vocab_size % 8) % 8;
let lm_head = linear_no_bias(cfg.hidden_size, vocab_size, vb.pp("lm_head"))?;
let embed_tokens = super::with_tracing::Embedding::from_weights(lm_head.weight().clone())?;
let vb_m = vb.pp("transformer");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(layer_idx, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb_m.pp("ln_f"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.swa.window_size,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let sliding_window = self.sliding_window / 2;
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| candle/candle-transformers/src/models/based.rs/0 | {
"file_path": "candle/candle-transformers/src/models/based.rs",
"repo_id": "candle",
"token_count": 9967
} |
//! ConvNeXt implementation.
//!
//! This candle implementation uses a pre-trained ConvNeXt network for inference. The
//! classification head has been trained on the ImageNet dataset and returns the
//! probabilities for the top-5 classes.
//!
//! Original code:
//! - 💻 [ConvNeXt](https://github.com/facebookresearch/ConvNeXt/)
//! - 💻 [ConvNeXt-V2](https://github.com/facebookresearch/ConvNeXt-V2/)
//! - 💻 [timm](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/convnext.py)
//! - 📝 [Paper](https://arxiv.org/abs/2201.03545) A ConvNet for the 2020s
//! - 📝 [Paper](https://arxiv.org/abs/2301.00808) ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders
//!
use candle::shape::ShapeWithOneHole;
use candle::{Result, D};
use candle_nn::{conv2d, layer_norm, linear, Conv2dConfig, Func, VarBuilder};
#[derive(Clone)]
pub struct Config {
blocks: [usize; 4],
channels: [usize; 4],
use_conv_mlp: bool,
}
impl Config {
pub fn atto() -> Self {
Self {
blocks: [2, 2, 6, 2],
channels: [40, 80, 160, 320],
use_conv_mlp: true,
}
}
pub fn femto() -> Self {
Self {
blocks: [2, 2, 6, 2],
channels: [48, 96, 192, 384],
use_conv_mlp: true,
}
}
pub fn pico() -> Self {
Self {
blocks: [2, 2, 6, 2],
channels: [64, 128, 256, 512],
use_conv_mlp: true,
}
}
pub fn nano() -> Self {
Self {
blocks: [2, 2, 8, 2],
channels: [80, 160, 320, 640],
use_conv_mlp: true,
}
}
pub fn tiny() -> Self {
Self {
blocks: [3, 3, 9, 3],
channels: [96, 192, 384, 768],
use_conv_mlp: false,
}
}
pub fn small() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [96, 192, 384, 768],
use_conv_mlp: false,
}
}
pub fn base() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [128, 256, 512, 1024],
use_conv_mlp: false,
}
}
pub fn large() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [192, 384, 768, 1536],
use_conv_mlp: false,
}
}
pub fn xlarge() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [256, 512, 1024, 2048],
use_conv_mlp: false,
}
}
pub fn huge() -> Self {
Self {
blocks: [3, 3, 27, 3],
channels: [352, 704, 1408, 2816],
use_conv_mlp: false,
}
}
}
// Layer norm for data in channels-last format.
fn layer_norm_cl(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(dim, 1e-6, vb)?;
Ok(Func::new(move |xs| xs.apply(&norm)))
}
// Layer norm for data in channels-first format.
fn layer_norm_cf(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(dim, 1e-6, vb)?;
Ok(Func::new(move |xs| {
let xs = xs
.permute((0, 2, 3, 1))?
.apply(&norm)?
.permute((0, 3, 1, 2))?;
Ok(xs)
}))
}
// Global response normalization layer
// Based on https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/grn.py
fn convnext2_grn(dim: usize, channels_last: bool, vb: VarBuilder) -> Result<Func<'static>> {
let (shape, spatial_dim, channel_dim) = if channels_last {
((1, 1, 1, ()).into_shape(dim)?, [1, 2], 3)
} else {
((1, (), 1, 1).into_shape(dim)?, [2, 3], 1)
};
let gamma = vb.get(dim, "weight")?.reshape(&shape)?;
let beta = vb.get(dim, "bias")?.reshape(&shape)?;
Ok(Func::new(move |xs| {
let residual = xs;
let gx = xs
.sqr()?
.sum_keepdim(spatial_dim)?
.mean_keepdim(spatial_dim)?
.sqrt()?;
let gxmean = gx.mean_keepdim(channel_dim)?;
let nx = gx.broadcast_div(&(gxmean + 1e-6)?)?;
let xs = xs
.broadcast_mul(&nx)?
.broadcast_mul(&gamma)?
.broadcast_add(&beta)?;
xs + residual
}))
}
// Initial downsampling via a patchify layer.
fn convnext_stem(out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 4,
..Default::default()
};
let patchify = conv2d(3, out_channels, 4, conv2d_cfg, vb.pp(0))?;
let norm = layer_norm_cf(out_channels, vb.pp(1))?;
Ok(Func::new(move |xs| xs.apply(&patchify)?.apply(&norm)))
}
// Downsampling applied after the stages.
fn convnext_downsample(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 2,
..Default::default()
};
let norm = layer_norm_cf(dim / 2, vb.pp(0))?;
let conv = conv2d(dim / 2, dim, 2, conv2d_cfg, vb.pp(1))?;
Ok(Func::new(move |xs| xs.apply(&norm)?.apply(&conv)))
}
// MLP block from the original paper with optional GRN layer (v2 models).
fn convnext_mlp(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let fc1 = linear(dim, 4 * dim, vb.pp("fc1"))?;
let fc2 = linear(4 * dim, dim, vb.pp("fc2"))?;
let grn = convnext2_grn(4 * dim, true, vb.pp("grn"));
Ok(Func::new(move |xs| {
let mut xs = xs.apply(&fc1)?.gelu_erf()?;
if let Ok(g) = &grn {
xs = xs.apply(g)?;
}
xs = xs.apply(&fc2)?;
Ok(xs)
}))
}
// MLP block using pointwise convolutions, with optional GRN layer (v2 models).
fn convnext_conv_mlp(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let fc1 = conv2d(dim, 4 * dim, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(4 * dim, dim, 1, conv2d_cfg, vb.pp("fc2"))?;
let grn = convnext2_grn(4 * dim, false, vb.pp("grn"));
Ok(Func::new(move |xs| {
let mut xs = xs.apply(&fc1)?.gelu_erf()?;
if let Ok(g) = &grn {
xs = xs.apply(g)?;
}
xs = xs.apply(&fc2)?;
Ok(xs)
}))
}
// A block consisting of a depthwise convolution, a MLP and layer scaling (v1 models only).
fn convnext_block(dim: usize, use_conv_mlp: bool, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
groups: dim,
padding: 3,
..Default::default()
};
let conv_dw = conv2d(dim, dim, 7, conv2d_cfg, vb.pp("conv_dw"))?;
let gamma = vb.get(dim, "gamma");
let (mlp, norm) = if use_conv_mlp {
(
convnext_conv_mlp(dim, vb.pp("mlp"))?,
layer_norm_cf(dim, vb.pp("norm"))?,
)
} else {
(
convnext_mlp(dim, vb.pp("mlp"))?,
layer_norm_cl(dim, vb.pp("norm"))?,
)
};
Ok(Func::new(move |xs| {
let residual = xs;
let mut xs = xs.apply(&conv_dw)?;
xs = if use_conv_mlp {
xs.apply(&norm)?.apply(&mlp)?
} else {
xs.permute((0, 2, 3, 1))?
.apply(&norm)?
.apply(&mlp)?
.permute((0, 3, 1, 2))?
};
if let Ok(g) = &gamma {
xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?;
};
xs + residual
}))
}
// Each stage contains blocks and a downsampling layer for the previous stage.
fn convnext_stage(cfg: &Config, stage_idx: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.blocks[stage_idx];
let mut blocks = Vec::with_capacity(nblocks);
let dim = cfg.channels[stage_idx];
if stage_idx > 0 {
blocks.push(convnext_downsample(dim, vb.pp("downsample"))?);
}
for block_idx in 0..nblocks {
blocks.push(convnext_block(
dim,
cfg.use_conv_mlp,
vb.pp(format!("blocks.{block_idx}")),
)?);
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
// Classification head.
fn convnext_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm_cl(outputs, vb.pp("norm"))?;
let linear = linear(outputs, nclasses, vb.pp("fc"))?;
Ok(Func::new(move |xs| xs.apply(&norm)?.apply(&linear)))
}
// Build a convnext model for a given configuration.
fn convnext_model(
config: &Config,
nclasses: Option<usize>,
vb: VarBuilder,
) -> Result<Func<'static>> {
let head = match nclasses {
None => None,
Some(nclasses) => {
let head = convnext_head(config.channels[3], nclasses, vb.pp("head"))?;
Some(head)
}
};
let stem = convnext_stem(config.channels[0], vb.pp("stem"))?;
let vb = vb.pp("stages");
let stage1 = convnext_stage(config, 0, vb.pp(0))?;
let stage2 = convnext_stage(config, 1, vb.pp(1))?;
let stage3 = convnext_stage(config, 2, vb.pp(2))?;
let stage4 = convnext_stage(config, 3, vb.pp(3))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.apply(&stage4)?
.mean(D::Minus2)?
.mean(D::Minus1)?;
match &head {
None => Ok(xs),
Some(head) => xs.apply(head),
}
}))
}
pub fn convnext(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
convnext_model(cfg, Some(nclasses), vb)
}
pub fn convnext_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
convnext_model(cfg, None, vb)
}
| candle/candle-transformers/src/models/convnext.rs/0 | {
"file_path": "candle/candle-transformers/src/models/convnext.rs",
"repo_id": "candle",
"token_count": 4949
} |
use super::model::{attention, timestep_embedding, Config, EmbedNd};
use crate::quantized_nn::{linear, linear_b, Linear};
use crate::quantized_var_builder::VarBuilder;
use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::{LayerNorm, RmsNorm};
fn layer_norm(dim: usize, vb: VarBuilder) -> Result<LayerNorm> {
let ws = Tensor::ones(dim, DType::F32, vb.device())?;
Ok(LayerNorm::new_no_bias(ws, 1e-6))
}
#[derive(Debug, Clone)]
pub struct MlpEmbedder {
in_layer: Linear,
out_layer: Linear,
}
impl MlpEmbedder {
fn new(in_sz: usize, h_sz: usize, vb: VarBuilder) -> Result<Self> {
let in_layer = linear(in_sz, h_sz, vb.pp("in_layer"))?;
let out_layer = linear(h_sz, h_sz, vb.pp("out_layer"))?;
Ok(Self {
in_layer,
out_layer,
})
}
}
impl candle::Module for MlpEmbedder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.in_layer)?.silu()?.apply(&self.out_layer)
}
}
#[derive(Debug, Clone)]
pub struct QkNorm {
query_norm: RmsNorm,
key_norm: RmsNorm,
}
impl QkNorm {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let query_norm = vb.get(dim, "query_norm.scale")?.dequantize(vb.device())?;
let query_norm = RmsNorm::new(query_norm, 1e-6);
let key_norm = vb.get(dim, "key_norm.scale")?.dequantize(vb.device())?;
let key_norm = RmsNorm::new(key_norm, 1e-6);
Ok(Self {
query_norm,
key_norm,
})
}
}
struct ModulationOut {
shift: Tensor,
scale: Tensor,
gate: Tensor,
}
impl ModulationOut {
fn scale_shift(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&(&self.scale + 1.)?)?
.broadcast_add(&self.shift)
}
fn gate(&self, xs: &Tensor) -> Result<Tensor> {
self.gate.broadcast_mul(xs)
}
}
#[derive(Debug, Clone)]
struct Modulation1 {
lin: Linear,
}
impl Modulation1 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = linear(dim, 3 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<ModulationOut> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(3, D::Minus1)?;
if ys.len() != 3 {
candle::bail!("unexpected len from chunk {ys:?}")
}
Ok(ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
})
}
}
#[derive(Debug, Clone)]
struct Modulation2 {
lin: Linear,
}
impl Modulation2 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = linear(dim, 6 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<(ModulationOut, ModulationOut)> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(6, D::Minus1)?;
if ys.len() != 6 {
candle::bail!("unexpected len from chunk {ys:?}")
}
let mod1 = ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
};
let mod2 = ModulationOut {
shift: ys[3].clone(),
scale: ys[4].clone(),
gate: ys[5].clone(),
};
Ok((mod1, mod2))
}
}
#[derive(Debug, Clone)]
pub struct SelfAttention {
qkv: Linear,
norm: QkNorm,
proj: Linear,
num_heads: usize,
}
impl SelfAttention {
fn new(dim: usize, num_heads: usize, qkv_bias: bool, vb: VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = linear_b(dim, dim * 3, qkv_bias, vb.pp("qkv"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let proj = linear(dim, dim, vb.pp("proj"))?;
Ok(Self {
qkv,
norm,
proj,
num_heads,
})
}
fn qkv(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let qkv = xs.apply(&self.qkv)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
Ok((q, k, v))
}
#[allow(unused)]
fn forward(&self, xs: &Tensor, pe: &Tensor) -> Result<Tensor> {
let (q, k, v) = self.qkv(xs)?;
attention(&q, &k, &v, pe)?.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct Mlp {
lin1: Linear,
lin2: Linear,
}
impl Mlp {
fn new(in_sz: usize, mlp_sz: usize, vb: VarBuilder) -> Result<Self> {
let lin1 = linear(in_sz, mlp_sz, vb.pp("0"))?;
let lin2 = linear(mlp_sz, in_sz, vb.pp("2"))?;
Ok(Self { lin1, lin2 })
}
}
impl candle::Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.lin1)?.gelu()?.apply(&self.lin2)
}
}
#[derive(Debug, Clone)]
pub struct DoubleStreamBlock {
img_mod: Modulation2,
img_norm1: LayerNorm,
img_attn: SelfAttention,
img_norm2: LayerNorm,
img_mlp: Mlp,
txt_mod: Modulation2,
txt_norm1: LayerNorm,
txt_attn: SelfAttention,
txt_norm2: LayerNorm,
txt_mlp: Mlp,
}
impl DoubleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let img_mod = Modulation2::new(h_sz, vb.pp("img_mod"))?;
let img_norm1 = layer_norm(h_sz, vb.pp("img_norm1"))?;
let img_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("img_attn"))?;
let img_norm2 = layer_norm(h_sz, vb.pp("img_norm2"))?;
let img_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("img_mlp"))?;
let txt_mod = Modulation2::new(h_sz, vb.pp("txt_mod"))?;
let txt_norm1 = layer_norm(h_sz, vb.pp("txt_norm1"))?;
let txt_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("txt_attn"))?;
let txt_norm2 = layer_norm(h_sz, vb.pp("txt_norm2"))?;
let txt_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("txt_mlp"))?;
Ok(Self {
img_mod,
img_norm1,
img_attn,
img_norm2,
img_mlp,
txt_mod,
txt_norm1,
txt_attn,
txt_norm2,
txt_mlp,
})
}
fn forward(
&self,
img: &Tensor,
txt: &Tensor,
vec_: &Tensor,
pe: &Tensor,
) -> Result<(Tensor, Tensor)> {
let (img_mod1, img_mod2) = self.img_mod.forward(vec_)?; // shift, scale, gate
let (txt_mod1, txt_mod2) = self.txt_mod.forward(vec_)?; // shift, scale, gate
let img_modulated = img.apply(&self.img_norm1)?;
let img_modulated = img_mod1.scale_shift(&img_modulated)?;
let (img_q, img_k, img_v) = self.img_attn.qkv(&img_modulated)?;
let txt_modulated = txt.apply(&self.txt_norm1)?;
let txt_modulated = txt_mod1.scale_shift(&txt_modulated)?;
let (txt_q, txt_k, txt_v) = self.txt_attn.qkv(&txt_modulated)?;
let q = Tensor::cat(&[txt_q, img_q], 2)?;
let k = Tensor::cat(&[txt_k, img_k], 2)?;
let v = Tensor::cat(&[txt_v, img_v], 2)?;
let attn = attention(&q, &k, &v, pe)?;
let txt_attn = attn.narrow(1, 0, txt.dim(1)?)?;
let img_attn = attn.narrow(1, txt.dim(1)?, attn.dim(1)? - txt.dim(1)?)?;
let img = (img + img_mod1.gate(&img_attn.apply(&self.img_attn.proj)?))?;
let img = (&img
+ img_mod2.gate(
&img_mod2
.scale_shift(&img.apply(&self.img_norm2)?)?
.apply(&self.img_mlp)?,
)?)?;
let txt = (txt + txt_mod1.gate(&txt_attn.apply(&self.txt_attn.proj)?))?;
let txt = (&txt
+ txt_mod2.gate(
&txt_mod2
.scale_shift(&txt.apply(&self.txt_norm2)?)?
.apply(&self.txt_mlp)?,
)?)?;
Ok((img, txt))
}
}
#[derive(Debug, Clone)]
pub struct SingleStreamBlock {
linear1: Linear,
linear2: Linear,
norm: QkNorm,
pre_norm: LayerNorm,
modulation: Modulation1,
h_sz: usize,
mlp_sz: usize,
num_heads: usize,
}
impl SingleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let head_dim = h_sz / cfg.num_heads;
let linear1 = linear(h_sz, h_sz * 3 + mlp_sz, vb.pp("linear1"))?;
let linear2 = linear(h_sz + mlp_sz, h_sz, vb.pp("linear2"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let pre_norm = layer_norm(h_sz, vb.pp("pre_norm"))?;
let modulation = Modulation1::new(h_sz, vb.pp("modulation"))?;
Ok(Self {
linear1,
linear2,
norm,
pre_norm,
modulation,
h_sz,
mlp_sz,
num_heads: cfg.num_heads,
})
}
fn forward(&self, xs: &Tensor, vec_: &Tensor, pe: &Tensor) -> Result<Tensor> {
let mod_ = self.modulation.forward(vec_)?;
let x_mod = mod_.scale_shift(&xs.apply(&self.pre_norm)?)?;
let x_mod = x_mod.apply(&self.linear1)?;
let qkv = x_mod.narrow(D::Minus1, 0, 3 * self.h_sz)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let mlp = x_mod.narrow(D::Minus1, 3 * self.h_sz, self.mlp_sz)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
let attn = attention(&q, &k, &v, pe)?;
let output = Tensor::cat(&[attn, mlp.gelu()?], 2)?.apply(&self.linear2)?;
xs + mod_.gate(&output)
}
}
#[derive(Debug, Clone)]
pub struct LastLayer {
norm_final: LayerNorm,
linear: Linear,
ada_ln_modulation: Linear,
}
impl LastLayer {
fn new(h_sz: usize, p_sz: usize, out_c: usize, vb: VarBuilder) -> Result<Self> {
let norm_final = layer_norm(h_sz, vb.pp("norm_final"))?;
let linear_ = linear(h_sz, p_sz * p_sz * out_c, vb.pp("linear"))?;
let ada_ln_modulation = linear(h_sz, 2 * h_sz, vb.pp("adaLN_modulation.1"))?;
Ok(Self {
norm_final,
linear: linear_,
ada_ln_modulation,
})
}
fn forward(&self, xs: &Tensor, vec: &Tensor) -> Result<Tensor> {
let chunks = vec.silu()?.apply(&self.ada_ln_modulation)?.chunk(2, 1)?;
let (shift, scale) = (&chunks[0], &chunks[1]);
let xs = xs
.apply(&self.norm_final)?
.broadcast_mul(&(scale.unsqueeze(1)? + 1.0)?)?
.broadcast_add(&shift.unsqueeze(1)?)?;
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
pub struct Flux {
img_in: Linear,
txt_in: Linear,
time_in: MlpEmbedder,
vector_in: MlpEmbedder,
guidance_in: Option<MlpEmbedder>,
pe_embedder: EmbedNd,
double_blocks: Vec<DoubleStreamBlock>,
single_blocks: Vec<SingleStreamBlock>,
final_layer: LastLayer,
}
impl Flux {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let img_in = linear(cfg.in_channels, cfg.hidden_size, vb.pp("img_in"))?;
let txt_in = linear(cfg.context_in_dim, cfg.hidden_size, vb.pp("txt_in"))?;
let mut double_blocks = Vec::with_capacity(cfg.depth);
let vb_d = vb.pp("double_blocks");
for idx in 0..cfg.depth {
let db = DoubleStreamBlock::new(cfg, vb_d.pp(idx))?;
double_blocks.push(db)
}
let mut single_blocks = Vec::with_capacity(cfg.depth_single_blocks);
let vb_s = vb.pp("single_blocks");
for idx in 0..cfg.depth_single_blocks {
let sb = SingleStreamBlock::new(cfg, vb_s.pp(idx))?;
single_blocks.push(sb)
}
let time_in = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("time_in"))?;
let vector_in = MlpEmbedder::new(cfg.vec_in_dim, cfg.hidden_size, vb.pp("vector_in"))?;
let guidance_in = if cfg.guidance_embed {
let mlp = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("guidance_in"))?;
Some(mlp)
} else {
None
};
let final_layer =
LastLayer::new(cfg.hidden_size, 1, cfg.in_channels, vb.pp("final_layer"))?;
let pe_dim = cfg.hidden_size / cfg.num_heads;
let pe_embedder = EmbedNd::new(pe_dim, cfg.theta, cfg.axes_dim.to_vec());
Ok(Self {
img_in,
txt_in,
time_in,
vector_in,
guidance_in,
pe_embedder,
double_blocks,
single_blocks,
final_layer,
})
}
}
impl super::WithForward for Flux {
#[allow(clippy::too_many_arguments)]
fn forward(
&self,
img: &Tensor,
img_ids: &Tensor,
txt: &Tensor,
txt_ids: &Tensor,
timesteps: &Tensor,
y: &Tensor,
guidance: Option<&Tensor>,
) -> Result<Tensor> {
if txt.rank() != 3 {
candle::bail!("unexpected shape for txt {:?}", txt.shape())
}
if img.rank() != 3 {
candle::bail!("unexpected shape for img {:?}", img.shape())
}
let dtype = img.dtype();
let pe = {
let ids = Tensor::cat(&[txt_ids, img_ids], 1)?;
ids.apply(&self.pe_embedder)?
};
let mut txt = txt.apply(&self.txt_in)?;
let mut img = img.apply(&self.img_in)?;
let vec_ = timestep_embedding(timesteps, 256, dtype)?.apply(&self.time_in)?;
let vec_ = match (self.guidance_in.as_ref(), guidance) {
(Some(g_in), Some(guidance)) => {
(vec_ + timestep_embedding(guidance, 256, dtype)?.apply(g_in))?
}
_ => vec_,
};
let vec_ = (vec_ + y.apply(&self.vector_in))?;
// Double blocks
for block in self.double_blocks.iter() {
(img, txt) = block.forward(&img, &txt, &vec_, &pe)?
}
// Single blocks
let mut img = Tensor::cat(&[&txt, &img], 1)?;
for block in self.single_blocks.iter() {
img = block.forward(&img, &vec_, &pe)?;
}
let img = img.i((.., txt.dim(1)?..))?;
self.final_layer.forward(&img, &vec_)
}
}
| candle/candle-transformers/src/models/flux/quantized_model.rs/0 | {
"file_path": "candle/candle-transformers/src/models/flux/quantized_model.rs",
"repo_id": "candle",
"token_count": 7943
} |
//! Marian Neural Machine Translation
//!
//! See "Marian: Fast Neural Machine Translation in C++" Junczys-Dowmunt et al. 2018
//! - [ACL Anthology](https://aclanthology.org/P18-4020/)
//! - [Github](https://github.com/marian-nmt/marian)
//!
use super::with_tracing::{linear, Embedding, Linear};
use candle::{Result, Tensor};
use candle_nn::{layer_norm, LayerNorm, VarBuilder};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub decoder_vocab_size: Option<usize>,
pub max_position_embeddings: usize,
pub encoder_layers: usize,
pub encoder_ffn_dim: usize,
pub encoder_attention_heads: usize,
pub decoder_layers: usize,
pub decoder_ffn_dim: usize,
pub decoder_attention_heads: usize,
pub use_cache: bool,
pub is_encoder_decoder: bool,
pub activation_function: candle_nn::Activation,
pub d_model: usize,
pub decoder_start_token_id: u32,
pub scale_embedding: bool,
pub pad_token_id: u32,
pub eos_token_id: u32,
pub forced_eos_token_id: u32,
pub share_encoder_decoder_embeddings: bool,
}
impl Config {
// https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-fr-en/blob/main/config.json
pub fn opus_mt_tc_big_fr_en() -> Self {
Self {
activation_function: candle_nn::Activation::Relu,
d_model: 1024,
decoder_attention_heads: 16,
decoder_ffn_dim: 4096,
decoder_layers: 6,
decoder_start_token_id: 53016,
decoder_vocab_size: Some(53017),
encoder_attention_heads: 16,
encoder_ffn_dim: 4096,
encoder_layers: 6,
eos_token_id: 43311,
forced_eos_token_id: 43311,
is_encoder_decoder: true,
max_position_embeddings: 1024,
pad_token_id: 53016,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 53017,
}
}
// https://huggingface.co/Helsinki-NLP/opus-mt-fr-en/blob/main/config.json
pub fn opus_mt_fr_en() -> Self {
Self {
activation_function: candle_nn::Activation::Swish,
d_model: 512,
decoder_attention_heads: 8,
decoder_ffn_dim: 2048,
decoder_layers: 6,
decoder_start_token_id: 59513,
decoder_vocab_size: Some(59514),
encoder_attention_heads: 8,
encoder_ffn_dim: 2048,
encoder_layers: 6,
eos_token_id: 0,
forced_eos_token_id: 0,
is_encoder_decoder: true,
max_position_embeddings: 512,
pad_token_id: 59513,
scale_embedding: true,
share_encoder_decoder_embeddings: true,
use_cache: true,
vocab_size: 59514,
}
}
}
#[derive(Debug, Clone)]
struct SinusoidalPositionalEmbedding {
emb: Embedding,
}
impl SinusoidalPositionalEmbedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dev = vb.device();
let dtype = vb.dtype();
let num_positions = cfg.max_position_embeddings;
let dim = cfg.d_model;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, num_positions as u32, dev)?
.to_dtype(dtype)?
.reshape((num_positions, 1))?;
let freqs = t.matmul(&inv_freq)?;
let sin = freqs.sin()?;
let cos = freqs.cos()?;
let weights = Tensor::cat(&[&sin, &cos], 1)?.contiguous()?;
let emb = Embedding::from_weights(weights)?;
Ok(Self { emb })
}
fn forward(&self, input_ids: &Tensor, past_kv_len: usize) -> Result<Tensor> {
let seq_len = input_ids.dim(1)?;
Tensor::arange(
past_kv_len as u32,
(past_kv_len + seq_len) as u32,
input_ids.device(),
)?
.apply(&self.emb)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
scaling: f64,
num_heads: usize,
head_dim: usize,
kv_cache: Option<(Tensor, Tensor)>,
is_decoder: bool,
}
impl Attention {
fn new(cfg: &Config, is_decoder: bool, vb: VarBuilder) -> Result<Self> {
let num_heads = if is_decoder {
cfg.decoder_attention_heads
} else {
cfg.encoder_attention_heads
};
let embed_dim = cfg.d_model;
let head_dim = embed_dim / num_heads;
let scaling = (head_dim as f64).powf(-0.5);
let q_proj = linear(embed_dim, embed_dim, vb.pp("q_proj"))?;
let k_proj = linear(embed_dim, embed_dim, vb.pp("k_proj"))?;
let v_proj = linear(embed_dim, embed_dim, vb.pp("v_proj"))?;
let out_proj = linear(embed_dim, embed_dim, vb.pp("out_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
scaling,
num_heads,
head_dim,
kv_cache: None,
is_decoder,
})
}
fn _shape(&self, tensor: &Tensor, bsz: usize) -> Result<Tensor> {
tensor
.reshape((bsz, (), self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
fn forward(
&mut self,
xs: &Tensor,
kv_states: Option<&Tensor>,
attn_mask: Option<&Tensor>,
) -> Result<Tensor> {
let (b_sz, tgt_len, _) = xs.dims3()?;
let query_states = (xs.apply(&self.q_proj)? * self.scaling)?;
let (key_states, value_states) = match kv_states {
None => {
let key_states = self._shape(&xs.apply(&self.k_proj)?, b_sz)?;
let value_states = self._shape(&xs.apply(&self.v_proj)?, b_sz)?;
if self.is_decoder {
let kv_states = match &self.kv_cache {
None => (key_states, value_states),
Some((p_key_states, p_value_states)) => {
let key_states = Tensor::cat(&[p_key_states, &key_states], 2)?;
let value_states = Tensor::cat(&[p_value_states, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some(kv_states.clone());
kv_states
} else {
(key_states, value_states)
}
}
Some(kv_states) => {
let key_states = self._shape(&kv_states.apply(&self.k_proj)?, b_sz)?;
let value_states = self._shape(&kv_states.apply(&self.v_proj)?, b_sz)?;
(key_states, value_states)
}
};
let proj_shape = (b_sz * self.num_heads, (), self.head_dim);
let query_states = self._shape(&query_states, b_sz)?.reshape(proj_shape)?;
let key_states = key_states.reshape(proj_shape)?;
let value_states = value_states.reshape(proj_shape)?;
let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?;
let attn_weights = match attn_mask {
None => attn_weights,
Some(attn_mask) => attn_weights.broadcast_add(attn_mask)?,
};
let attn_probs = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_probs.matmul(&value_states)?;
attn_output
.reshape((b_sz, self.num_heads, tgt_len, self.head_dim))?
.transpose(1, 2)?
.reshape((b_sz, tgt_len, self.head_dim * self.num_heads))?
.apply(&self.out_proj)
}
fn reset_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct EncoderLayer {
self_attn: Attention,
self_attn_layer_norm: LayerNorm,
activation_fn: candle_nn::Activation,
fc1: Linear,
fc2: Linear,
final_layer_norm: LayerNorm,
}
impl EncoderLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, true, vb.pp("self_attn"))?;
let self_attn_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("self_attn_layer_norm"))?;
let fc1 = linear(cfg.d_model, cfg.encoder_ffn_dim, vb.pp("fc1"))?;
let fc2 = linear(cfg.encoder_ffn_dim, cfg.d_model, vb.pp("fc2"))?;
let final_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("final_layer_norm"))?;
Ok(Self {
self_attn,
self_attn_layer_norm,
activation_fn: cfg.activation_function,
fc1,
fc2,
final_layer_norm,
})
}
fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = (self.self_attn.forward(xs, None, None)? + residual)?
.apply(&self.self_attn_layer_norm)?;
let residual = &xs;
let xs = xs
.apply(&self.fc1)?
.apply(&self.activation_fn)?
.apply(&self.fc2)?;
(xs + residual)?.apply(&self.final_layer_norm)
}
fn reset_kv_cache(&mut self) {
self.self_attn.reset_kv_cache()
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
self_attn_layer_norm: LayerNorm,
activation_fn: candle_nn::Activation,
encoder_attn: Attention,
encoder_attn_layer_norm: LayerNorm,
fc1: Linear,
fc2: Linear,
final_layer_norm: LayerNorm,
}
impl DecoderLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, true, vb.pp("self_attn"))?;
let self_attn_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("self_attn_layer_norm"))?;
let encoder_attn = Attention::new(cfg, true, vb.pp("encoder_attn"))?;
let encoder_attn_layer_norm =
layer_norm(cfg.d_model, 1e-5, vb.pp("encoder_attn_layer_norm"))?;
let fc1 = linear(cfg.d_model, cfg.decoder_ffn_dim, vb.pp("fc1"))?;
let fc2 = linear(cfg.decoder_ffn_dim, cfg.d_model, vb.pp("fc2"))?;
let final_layer_norm = layer_norm(cfg.d_model, 1e-5, vb.pp("final_layer_norm"))?;
Ok(Self {
self_attn,
self_attn_layer_norm,
activation_fn: cfg.activation_function,
encoder_attn,
encoder_attn_layer_norm,
fc1,
fc2,
final_layer_norm,
})
}
fn forward(
&mut self,
xs: &Tensor,
encoder_xs: Option<&Tensor>,
attn_mask: &Tensor,
) -> Result<Tensor> {
let residual = xs;
let xs = (self.self_attn.forward(xs, None, Some(attn_mask))? + residual)?
.apply(&self.self_attn_layer_norm)?;
let xs = match encoder_xs {
None => xs,
Some(encoder_xs) => {
let residual = &xs;
let xs = self.encoder_attn.forward(&xs, Some(encoder_xs), None)?;
(residual + xs)?.apply(&self.encoder_attn_layer_norm)?
}
};
let residual = &xs;
let xs = xs
.apply(&self.fc1)?
.apply(&self.activation_fn)?
.apply(&self.fc2)?;
let xs = (xs + residual)?.apply(&self.final_layer_norm)?;
Ok(xs)
}
fn reset_kv_cache(&mut self) {
self.self_attn.reset_kv_cache();
self.encoder_attn.reset_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
embed_tokens: Embedding,
embed_positions: SinusoidalPositionalEmbedding,
layers: Vec<EncoderLayer>,
embed_scale: Option<f64>,
}
impl Encoder {
fn new(cfg: &Config, embed_tokens: &Embedding, vb: VarBuilder) -> Result<Self> {
let embed_positions = SinusoidalPositionalEmbedding::new(cfg, vb.pp("embed_positions"))?;
let mut layers = Vec::with_capacity(cfg.encoder_layers);
let vb_l = vb.pp("layers");
for idx in 0..cfg.encoder_layers {
let layer = EncoderLayer::new(cfg, vb_l.pp(idx))?;
layers.push(layer)
}
let embed_scale = if cfg.scale_embedding {
Some((cfg.d_model as f64).sqrt())
} else {
None
};
Ok(Self {
embed_tokens: embed_tokens.clone(),
embed_positions,
layers,
embed_scale,
})
}
pub fn forward(&mut self, xs: &Tensor, past_kv_len: usize) -> Result<Tensor> {
let xs = xs.apply(&self.embed_tokens)?;
let xs = match self.embed_scale {
None => xs,
Some(scale) => (xs * scale)?,
};
let embed_pos = self
.embed_positions
.forward(&xs, past_kv_len)?
.unsqueeze(0)?;
let mut xs = xs.broadcast_add(&embed_pos)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs)?
}
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.reset_kv_cache()
}
}
}
#[derive(Debug, Clone)]
pub struct Decoder {
embed_tokens: Embedding,
embed_positions: SinusoidalPositionalEmbedding,
layers: Vec<DecoderLayer>,
embed_scale: Option<f64>,
}
impl Decoder {
fn new(cfg: &Config, embed_tokens: &Embedding, vb: VarBuilder) -> Result<Self> {
let embed_positions = SinusoidalPositionalEmbedding::new(cfg, vb.pp("embed_positions"))?;
let mut layers = Vec::with_capacity(cfg.decoder_layers);
let vb_l = vb.pp("layers");
for idx in 0..cfg.decoder_layers {
let layer = DecoderLayer::new(cfg, vb_l.pp(idx))?;
layers.push(layer)
}
let embed_scale = if cfg.scale_embedding {
Some((cfg.d_model as f64).sqrt())
} else {
None
};
Ok(Self {
embed_tokens: embed_tokens.clone(),
embed_positions,
layers,
embed_scale,
})
}
pub fn forward(
&mut self,
xs: &Tensor,
encoder_xs: Option<&Tensor>,
past_kv_len: usize,
attn_mask: &Tensor,
) -> Result<Tensor> {
let xs = xs.apply(&self.embed_tokens)?;
let xs = match self.embed_scale {
None => xs,
Some(scale) => (xs * scale)?,
};
let embed_pos = self
.embed_positions
.forward(&xs, past_kv_len)?
.unsqueeze(0)?;
let mut xs = xs.broadcast_add(&embed_pos)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, encoder_xs, attn_mask)?;
}
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.reset_kv_cache()
}
}
}
#[derive(Debug, Clone)]
struct Model {
shared: Embedding,
encoder: Encoder,
decoder: Decoder,
}
impl Model {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("shared"))?;
let encoder = Encoder::new(cfg, &shared, vb.pp("encoder"))?;
let decoder = Decoder::new(cfg, &shared, vb.pp("decoder"))?;
Ok(Self {
shared,
encoder,
decoder,
})
}
fn reset_kv_cache(&mut self) {
self.encoder.reset_kv_cache();
self.decoder.reset_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct MTModel {
model: Model,
lm_head: Linear,
final_logits_bias: Tensor,
}
impl MTModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let target_vocab_size = cfg.decoder_vocab_size.unwrap_or(cfg.vocab_size);
let final_logits_bias = vb.get((1, target_vocab_size), "final_logits_bias")?;
let model = Model::new(cfg, vb.pp("model"))?;
let lm_head = Linear::from_weights(model.shared.embeddings().clone(), None);
Ok(Self {
model,
lm_head,
final_logits_bias,
})
}
pub fn encoder(&mut self) -> &mut Encoder {
&mut self.model.encoder
}
pub fn decoder(&mut self) -> &mut Decoder {
&mut self.model.decoder
}
pub fn decode(
&mut self,
xs: &Tensor,
encoder_xs: &Tensor,
past_kv_len: usize,
) -> Result<Tensor> {
let seq_len = xs.dim(1)?;
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 }))
.collect();
let mask = Tensor::from_vec(mask, (seq_len, seq_len), xs.device())?;
self.model
.decoder
.forward(xs, Some(encoder_xs), past_kv_len, &mask)?
.apply(&self.lm_head)?
.broadcast_add(&self.final_logits_bias)
}
pub fn reset_kv_cache(&mut self) {
self.model.reset_kv_cache();
}
}
| candle/candle-transformers/src/models/marian.rs/0 | {
"file_path": "candle/candle-transformers/src/models/marian.rs",
"repo_id": "candle",
"token_count": 9013
} |
//! Mobile CLIP model, combining a lightweight vision encoder with a text encoder
//!
//! A mobile-optimized CLIP implementation that uses:
//! - FastViT as the vision encoder
//! - OpenCLIP text encoder
//! - Projection layers to align the feature spaces
//!
//! See model details at:
//! - [FastViT](https://arxiv.org/abs/2303.14189)
//! - [OpenCLIP](https://github.com/mlfoundations/open_clip)
//!
//! References:
//! - [MobileVLM](https://huggingface.co/mobileVLM)
//! - [MetaCLIP](https://arxiv.org/abs/2309.16671)
//!
use super::fastvit;
use super::openclip::text_model;
use candle::{Result, Tensor, D};
use candle_nn::{Func, VarBuilder};
#[derive(Clone, Debug)]
pub struct MobileClipModel {
text_model: text_model::OpenClipTextTransformer,
vision_model: Func<'static>,
text_projection: Tensor,
logit_scale: Tensor,
}
#[derive(Clone, Debug)]
pub struct MobileClipConfig {
pub text_config: text_model::Config,
pub vision_config: fastvit::Config,
pub image_size: usize,
}
impl MobileClipConfig {
pub fn s1() -> Self {
let text_config = text_model::Config::vit_base_patch32();
let vision_config = fastvit::Config::mci1();
Self {
text_config,
vision_config,
image_size: 256,
}
}
pub fn s2() -> Self {
let text_config = text_model::Config::vit_base_patch32();
let vision_config = fastvit::Config::mci2();
Self {
text_config,
vision_config,
image_size: 256,
}
}
}
impl MobileClipModel {
pub fn new(vs: VarBuilder, c: &MobileClipConfig) -> Result<Self> {
let vision_model = fastvit::fastvit(&c.vision_config, 512, vs.pp("visual.trunk"))?;
let text_model = text_model::OpenClipTextTransformer::new(vs.pp("text"), &c.text_config)?;
let text_projection = vs.get(
(c.text_config.embed_dim, c.text_config.projection_dim),
"text.text_projection",
)?;
let logit_scale = vs.get(&[], "logit_scale")?;
Ok(Self {
text_model,
vision_model,
text_projection,
logit_scale,
})
}
pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> {
input_ids
.apply(&self.text_model)?
.matmul(&self.text_projection)
}
pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> {
pixel_values.apply(&self.vision_model)
}
pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> {
let image_features = self.get_image_features(pixel_values)?;
let text_features = self.get_text_features(input_ids)?;
let image_features_normalized = div_l2_norm(&image_features)?;
let text_features_normalized = div_l2_norm(&text_features)?;
let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?;
let logit_scale = self.logit_scale.exp()?;
let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?;
let logits_per_image = logits_per_text.t()?;
Ok((logits_per_text, logits_per_image))
}
}
pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> {
let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?;
v.broadcast_div(&l2_norm)
}
| candle/candle-transformers/src/models/mobileclip.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mobileclip.rs",
"repo_id": "candle",
"token_count": 1499
} |
//! Microsoft Phi model implementation
//!
//! The Phi series are decoder-only transformers designed for code and language tasks.
//!
//! Key characteristics:
//! - Decoder-only transformer architecture
//! - RoPE embeddings
//! - Layer normalization
//! - QK normalization
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-phi1-phi2-wasm-demo)
//! - 🤗 [HF Link](https://huggingface.co/microsoft/phi-2)
//!
use crate::models::with_tracing::{layer_norm, linear, Embedding, LayerNorm, Linear};
/// Phi model.
/// https://huggingface.co/microsoft/phi-2
/// There is an alternative implementation of the phi model in mixformers.rs.
/// This corresponds to the model update made with the following commit:
/// https://huggingface.co/microsoft/phi-2/commit/cb2f4533604d8b67de604e7df03bfe6f3ca22869
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use serde::Deserialize;
// https://huggingface.co/microsoft/phi-2/blob/main/configuration_phi.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: Option<usize>,
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) layer_norm_eps: f64,
pub(crate) tie_word_embeddings: bool,
pub(crate) rope_theta: f32,
pub(crate) partial_rotary_factor: f64,
pub(crate) qk_layernorm: bool,
}
impl Config {
fn num_key_value_heads(&self) -> usize {
self.num_key_value_heads.unwrap_or(self.num_attention_heads)
}
fn head_dim(&self) -> usize {
self.hidden_size / self.num_attention_heads
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
dim: usize,
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(cfg: &Config, dev: &Device) -> Result<Self> {
let dim = (cfg.partial_rotary_factor * cfg.head_dim() as f64) as usize;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, cfg.max_position_embeddings as u32, dev)?
.to_dtype(DType::F32)?
.reshape((cfg.max_position_embeddings, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
dim,
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (_b_size, _num_heads, seq_len, _headdim) = xs.dims4()?;
let xs_rot = xs.i((.., .., .., ..self.dim))?.contiguous()?;
let xs_pass = xs.i((.., .., .., self.dim..))?;
let c = self.cos.narrow(0, seqlen_offset, seq_len)?;
let s = self.sin.narrow(0, seqlen_offset, seq_len)?;
let xs_rot = candle_nn::rotary_emb::rope(&xs_rot, &c, &s)?;
Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1)
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
fc1: Linear,
fc2: Linear,
act: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?;
let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?;
Ok(Self {
fc1,
fc2,
// This does not match the mixformers implementation where Gelu is used rather than
// GeluNew.
act: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
dense: Linear,
kv_cache: Option<(Tensor, Tensor)>,
q_layernorm: Option<LayerNorm>,
k_layernorm: Option<LayerNorm>,
rotary_emb: RotaryEmbedding,
softmax_scale: f64,
num_heads: usize,
num_kv_heads: usize,
head_dim: usize,
span: tracing::Span,
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads();
let head_dim = cfg.head_dim();
let q_proj = linear(cfg.hidden_size, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(cfg.hidden_size, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(cfg.hidden_size, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let dense = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("dense"))?;
// Alternative rope scalings are not supported.
let rotary_emb = RotaryEmbedding::new(cfg, vb.device())?;
let (q_layernorm, k_layernorm) = if cfg.qk_layernorm {
let q_layernorm = layer_norm(head_dim, cfg.layer_norm_eps, vb.pp("q_layernorm"))?;
let k_layernorm = layer_norm(head_dim, cfg.layer_norm_eps, vb.pp("k_layernorm"))?;
(Some(q_layernorm), Some(k_layernorm))
} else {
(None, None)
};
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
Ok(Self {
q_proj,
k_proj,
v_proj,
dense,
kv_cache: None,
q_layernorm,
k_layernorm,
rotary_emb,
softmax_scale,
num_heads,
num_kv_heads,
head_dim,
span: tracing::span!(tracing::Level::TRACE, "attention"),
})
}
fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> {
crate::utils::repeat_kv(xs, self.num_heads / self.num_kv_heads)
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = match &self.q_layernorm {
None => query_states,
Some(ln) => query_states.apply(ln)?,
};
let key_states = match &self.k_layernorm {
None => key_states,
Some(ln) => key_states.apply(ln)?,
};
let query_states = query_states
.reshape((b_size, seq_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_size, seq_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_size, seq_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
// Rotary embeddings.
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(2)?,
};
let query_states = self
.rotary_emb
.apply_rotary_emb(&query_states, seqlen_offset)?;
let key_states = self
.rotary_emb
.apply_rotary_emb(&key_states, seqlen_offset)?;
// KV cache.
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key_states], 2)?;
let v = Tensor::cat(&[prev_v, &value_states], 2)?;
(k, v)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
// Repeat kv.
let key_states = self.repeat_kv(key_states)?.contiguous()?;
let value_states = self.repeat_kv(value_states)?.contiguous()?;
let attn_weights = (query_states
.to_dtype(DType::F32)?
.contiguous()?
.matmul(&key_states.to_dtype(DType::F32)?.t()?)?
* self.softmax_scale)?;
let attn_weights = match mask {
None => attn_weights,
Some(mask) => masked_fill(
&attn_weights,
&mask.broadcast_left((b_size, self.num_heads))?,
f32::NEG_INFINITY,
)?,
};
let attn_weights =
candle_nn::ops::softmax_last_dim(&attn_weights)?.to_dtype(value_states.dtype())?;
let attn_output = attn_weights.matmul(&value_states)?;
let attn_output = attn_output
.transpose(1, 2)?
.reshape((b_size, seq_len, ()))?;
attn_output.apply(&self.dense)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: LayerNorm,
span: tracing::Span,
}
impl DecoderLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb.pp("input_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = xs;
let xs = xs.apply(&self.input_layernorm)?;
let attn_outputs = self.self_attn.forward(&xs, mask)?;
let feed_forward_hidden_states = self.mlp.forward(&xs)?;
attn_outputs + feed_forward_hidden_states + residual
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Clone)]
pub struct Model {
embed_tokens: Embedding,
layers: Vec<DecoderLayer>,
final_layernorm: LayerNorm,
lm_head: Linear,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let final_layernorm = layer_norm(
cfg.hidden_size,
cfg.layer_norm_eps,
vb_m.pp("final_layernorm"),
)?;
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_m = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(cfg, vb_m.pp(layer_idx))?;
layers.push(layer)
}
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
final_layernorm,
lm_head,
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embed_tokens)?;
let mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, mask.as_ref())?;
}
xs.apply(&self.final_layernorm)?
.narrow(1, seq_len - 1, 1)?
.apply(&self.lm_head)?
.squeeze(1)
}
pub fn clear_kv_cache(&mut self) {
self.layers.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
| candle/candle-transformers/src/models/phi.rs/0 | {
"file_path": "candle/candle-transformers/src/models/phi.rs",
"repo_id": "candle",
"token_count": 6213
} |
//! Qwen2 model implementation with quantization support.
//!
//! Qwen2 is a chat-optimized language model that supports 8-bit quantization
//! for reduced memory usage and faster inference.
//!
//! Key characteristics:
//! - Group Query Attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - [Model Card](https://huggingface.co/Qwen/Qwen2)
//!
use crate::{quantized_nn::RmsNorm, utils::repeat_kv};
use candle::{
quantized::{gguf_file, QMatMul},
DType, Device, IndexOp, Result, Tensor,
};
use candle_nn::{Embedding, Module};
use std::collections::HashMap;
#[derive(Debug, Clone)]
struct Mlp {
feed_forward_w1: QMatMul,
feed_forward_w2: QMatMul,
feed_forward_w3: QMatMul,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let w1 = self.feed_forward_w1.forward(xs)?;
let w3 = self.feed_forward_w3.forward(xs)?;
self.feed_forward_w2
.forward(&(candle_nn::ops::silu(&w1)? * w3)?)
}
}
#[derive(Debug, Clone)]
struct LayerWeights {
attention_wq: QMatMul,
attention_wk: QMatMul,
attention_wv: QMatMul,
attention_bq: Tensor,
attention_bk: Tensor,
attention_bv: Tensor,
attention_wo: QMatMul,
attention_norm: RmsNorm,
mlp: Mlp,
ffn_norm: RmsNorm,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
cos: Tensor,
sin: Tensor,
neg_inf: Tensor,
kv_cache: Option<(Tensor, Tensor)>,
span_attn: tracing::Span,
span_rot: tracing::Span,
span_mlp: tracing::Span,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> {
let shape = mask.shape();
let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?;
Ok(m)
}
impl LayerWeights {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _n_head, seq_len, _n_embd) = x.dims4()?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(&x.contiguous()?, &cos, &sin)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, n_embd) = x.dims3()?;
let q = self.attention_wq.forward(x)?;
let k = self.attention_wk.forward(x)?;
let v = self.attention_wv.forward(x)?;
let q = q.broadcast_add(&self.attention_bq)?;
let k = k.broadcast_add(&self.attention_bk)?;
let v = v.broadcast_add(&self.attention_bv)?;
let q = q
.reshape((b_sz, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let v = v
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
// let (q, k) = self
// .rotary_embedding
// .apply_rotary_emb_qkv(&q, &k, index_pos)?;
let q = self.apply_rotary_emb(&q, index_pos)?;
let k = self.apply_rotary_emb(&k, index_pos)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((k_cache, v_cache)) => {
if index_pos == 0 {
(k, v)
} else {
let k = Tensor::cat(&[k_cache, &k], 2)?;
let v = Tensor::cat(&[v_cache, &v], 2)?;
(k, v)
}
}
};
self.kv_cache = Some((k.clone(), v.clone()));
// Support for MQA, useful for 70B models and mistral.
let k = repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = repeat_kv(v, self.n_head / self.n_kv_head)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = match mask {
None => att,
Some(mask) => {
let mask = mask.broadcast_as(att.shape())?;
masked_fill(&att, &mask, &self.neg_inf)?
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let y = att.matmul(&v.contiguous()?)?;
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.attention_wo.forward(&y)?;
Ok(y)
}
}
pub struct ModelWeights {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
norm: RmsNorm,
output: QMatMul,
masks: HashMap<usize, Tensor>,
span: tracing::Span,
span_output: tracing::Span,
}
fn precomput_freqs_cis(
head_dim: usize,
freq_base: f32,
context_length: usize,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, context_length as u32, device)?
.to_dtype(DType::F32)?
.reshape((context_length, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok((cos, sin))
}
impl ModelWeights {
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let md_get = |s: &str| match ct.metadata.get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
let head_count = md_get("qwen2.attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("qwen2.attention.head_count_kv")?.to_u32()? as usize;
let embedding_length = md_get("qwen2.embedding_length")?.to_u32()? as usize;
let context_length = md_get("qwen2.context_length")?.to_u32()? as usize;
let block_count = md_get("qwen2.block_count")?.to_u32()? as usize;
let rms_norm_eps = md_get("qwen2.attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let rope_freq_base = md_get("qwen2.rope.freq_base")
.and_then(|m| m.to_f32())
.unwrap_or(10000f32);
let head_dim = embedding_length / head_count;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings.dequantize(device)?;
let norm = RmsNorm::from_qtensor(
ct.tensor(reader, "output_norm.weight", device)?,
rms_norm_eps,
)?;
let output = match ct.tensor(reader, "output.weight", device) {
Ok(v) => QMatMul::from_qtensor(v)?,
_ => {
// use tie_word_embeddings
QMatMul::from_qtensor(ct.tensor(reader, "token_embd.weight", device)?)?
}
};
let (cos, sin) = precomput_freqs_cis(head_dim, rope_freq_base, context_length, device)?;
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?;
let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?;
let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?;
let attention_bq = ct.tensor(reader, &format!("{prefix}.attn_q.bias"), device)?;
let attention_bk = ct.tensor(reader, &format!("{prefix}.attn_k.bias"), device)?;
let attention_bv = ct.tensor(reader, &format!("{prefix}.attn_v.bias"), device)?;
let attention_wo =
ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?;
let mlp = {
let feed_forward_w1 =
ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?;
let feed_forward_w2 =
ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?;
let feed_forward_w3 =
ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?;
Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
}
};
let attention_norm =
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?;
let ffn_norm = ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp");
layers.push(LayerWeights {
attention_wq: QMatMul::from_qtensor(attention_wq)?,
attention_wk: QMatMul::from_qtensor(attention_wk)?,
attention_wv: QMatMul::from_qtensor(attention_wv)?,
attention_bq: attention_bq.dequantize(device)?,
attention_bk: attention_bk.dequantize(device)?,
attention_bv: attention_bv.dequantize(device)?,
attention_wo: QMatMul::from_qtensor(attention_wo)?,
attention_norm: RmsNorm::from_qtensor(attention_norm, rms_norm_eps)?,
cos: cos.clone(),
sin: sin.clone(),
mlp,
ffn_norm: RmsNorm::from_qtensor(ffn_norm, rms_norm_eps)?,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim,
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_rot,
span_mlp,
});
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
norm,
output,
masks: HashMap::new(),
span,
span_output,
})
}
fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mask = if seq_len == 1 {
None
} else {
Some(self.mask(seq_len, x.device())?)
};
let _enter = self.span.enter();
let mut layer_in = self.tok_embeddings.forward(x)?;
for layer in self.layers.iter_mut() {
let x = layer_in;
let residual = &x;
let x = layer.attention_norm.forward(&x)?;
let attn = layer.forward_attn(&x, mask.as_ref(), index_pos)?;
let x = (attn + residual)?;
// MLP
let _enter = layer.span_mlp.enter();
let residual = &x;
let x = layer.ffn_norm.forward(&x)?;
let x = layer.mlp.forward(&x)?;
let x = (x + residual)?;
layer_in = x
}
let x = self.norm.forward(&layer_in)?;
let x = x.i((.., seq_len - 1, ..))?;
let _enter = self.span_output.enter();
self.output.forward(&x)
}
}
| candle/candle-transformers/src/models/quantized_qwen2.rs/0 | {
"file_path": "candle/candle-transformers/src/models/quantized_qwen2.rs",
"repo_id": "candle",
"token_count": 6400
} |
//! Segment Anything Model (SAM)
//!
//! SAM is an architecture for image segmentation, capable of segmenting any object
//! in an image based on prompts like points or boxes. //! This model provides a robust and fast image segmentation pipeline that can be tweaked via
//! some prompting (requesting some points to be in the target mask, requesting some
//! points to be part of the background so _not_ in the target mask, specifying some
//! bounding box).
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/candle-segment-anything-wasm)
//! - 💻 [GH Link](https://github.com/facebookresearch/segment-anything)
//! - 📝 [Paper](https://arxiv.org/abs/2304.02643)
//! - 💡 The default backbone can be replaced by the smaller and faster TinyViT model based on [MobileSAM](https://github.com/ChaoningZhang/MobileSAM).
//!
//!
//! ## Example
//!
//! ```bash
//! cargo run --example segment-anything --release -- \
//! --image candle-examples/examples/yolo-v8/assets/bike.jpg
//! --use-tiny --point 0.6,0.6 --point 0.6,0.55
//! ```
//!
//! <div align=center style="display: flex; justify-content: center; gap: 10px;">
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width="30%">
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/segment-anything/assets/single_pt_prompt.jpg" alt="" width="30%">
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/segment-anything/assets/two_pt_prompt.jpg" alt="" width="30%">
//! </div>
//!
//!
//! > Original; Prompt with `--point 0.6,0.55`; Prompt with `--point 0.6,0.6 --point 0.6,0.55`
//!
pub use crate::models::with_tracing::Linear;
use candle::{Result, Tensor};
use candle_nn::{Module, VarBuilder};
pub mod image_encoder;
pub mod mask_decoder;
pub mod prompt_encoder;
pub mod sam;
pub mod tiny_vit;
pub mod transformer;
pub fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
if bias {
crate::models::with_tracing::linear(in_dim, out_dim, vb)
} else {
crate::models::with_tracing::linear_no_bias(in_dim, out_dim, vb)
}
}
#[derive(Debug)]
pub struct LayerNorm2d {
weight: Tensor,
bias: Tensor,
num_channels: usize,
eps: f64,
}
impl LayerNorm2d {
pub fn new(num_channels: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(num_channels, "weight")?;
let bias = vb.get(num_channels, "bias")?;
Ok(Self {
weight,
bias,
num_channels,
eps,
})
}
}
impl Module for LayerNorm2d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let u = xs.mean_keepdim(1)?;
let xs = xs.broadcast_sub(&u)?;
let s = xs.sqr()?.mean_keepdim(1)?;
let xs = xs.broadcast_div(&(s + self.eps)?.sqrt()?)?;
xs.broadcast_mul(&self.weight.reshape((1, self.num_channels, 1, 1))?)?
.broadcast_add(&self.bias.reshape((1, self.num_channels, 1, 1))?)
}
}
#[derive(Debug)]
pub struct MlpBlock {
lin1: Linear,
lin2: Linear,
activation: candle_nn::Activation,
span: tracing::Span,
}
impl MlpBlock {
pub fn new(
embedding_dim: usize,
mlp_dim: usize,
activation: candle_nn::Activation,
vb: VarBuilder,
) -> Result<Self> {
let lin1 = linear(vb.pp("lin1"), embedding_dim, mlp_dim, true)?;
let lin2 = linear(vb.pp("lin2"), mlp_dim, embedding_dim, true)?;
let span = tracing::span!(tracing::Level::TRACE, "mlp-block");
Ok(Self {
lin1,
lin2,
activation,
span,
})
}
}
impl Module for MlpBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.lin1)?
.apply(&self.activation)?
.apply(&self.lin2)
}
}
| candle/candle-transformers/src/models/segment_anything/mod.rs/0 | {
"file_path": "candle/candle-transformers/src/models/segment_anything/mod.rs",
"repo_id": "candle",
"token_count": 1721
} |
//! 2D UNet Building Blocks
//!
use super::attention::{
AttentionBlock, AttentionBlockConfig, SpatialTransformer, SpatialTransformerConfig,
};
use super::resnet::{ResnetBlock2D, ResnetBlock2DConfig};
use crate::models::with_tracing::{conv2d, Conv2d};
use candle::{Module, Result, Tensor, D};
use candle_nn as nn;
#[derive(Debug)]
struct Downsample2D {
conv: Option<Conv2d>,
padding: usize,
span: tracing::Span,
}
impl Downsample2D {
fn new(
vs: nn::VarBuilder,
in_channels: usize,
use_conv: bool,
out_channels: usize,
padding: usize,
) -> Result<Self> {
let conv = if use_conv {
let config = nn::Conv2dConfig {
stride: 2,
padding,
..Default::default()
};
let conv = conv2d(in_channels, out_channels, 3, config, vs.pp("conv"))?;
Some(conv)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "downsample2d");
Ok(Self {
conv,
padding,
span,
})
}
}
impl Module for Downsample2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
match &self.conv {
None => xs.avg_pool2d(2),
Some(conv) => {
if self.padding == 0 {
let xs = xs
.pad_with_zeros(D::Minus1, 0, 1)?
.pad_with_zeros(D::Minus2, 0, 1)?;
conv.forward(&xs)
} else {
conv.forward(xs)
}
}
}
}
}
// This does not support the conv-transpose mode.
#[derive(Debug)]
struct Upsample2D {
conv: Conv2d,
span: tracing::Span,
}
impl Upsample2D {
fn new(vs: nn::VarBuilder, in_channels: usize, out_channels: usize) -> Result<Self> {
let config = nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv = conv2d(in_channels, out_channels, 3, config, vs.pp("conv"))?;
let span = tracing::span!(tracing::Level::TRACE, "upsample2d");
Ok(Self { conv, span })
}
}
impl Upsample2D {
fn forward(&self, xs: &Tensor, size: Option<(usize, usize)>) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = match size {
None => {
let (_bsize, _channels, h, w) = xs.dims4()?;
xs.upsample_nearest2d(2 * h, 2 * w)?
}
Some((h, w)) => xs.upsample_nearest2d(h, w)?,
};
self.conv.forward(&xs)
}
}
#[derive(Debug, Clone, Copy)]
pub struct DownEncoderBlock2DConfig {
pub num_layers: usize,
pub resnet_eps: f64,
pub resnet_groups: usize,
pub output_scale_factor: f64,
pub add_downsample: bool,
pub downsample_padding: usize,
}
impl Default for DownEncoderBlock2DConfig {
fn default() -> Self {
Self {
num_layers: 1,
resnet_eps: 1e-6,
resnet_groups: 32,
output_scale_factor: 1.,
add_downsample: true,
downsample_padding: 1,
}
}
}
#[derive(Debug)]
pub struct DownEncoderBlock2D {
resnets: Vec<ResnetBlock2D>,
downsampler: Option<Downsample2D>,
span: tracing::Span,
pub config: DownEncoderBlock2DConfig,
}
impl DownEncoderBlock2D {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
config: DownEncoderBlock2DConfig,
) -> Result<Self> {
let resnets: Vec<_> = {
let vs = vs.pp("resnets");
let conv_cfg = ResnetBlock2DConfig {
eps: config.resnet_eps,
out_channels: Some(out_channels),
groups: config.resnet_groups,
output_scale_factor: config.output_scale_factor,
temb_channels: None,
..Default::default()
};
(0..(config.num_layers))
.map(|i| {
let in_channels = if i == 0 { in_channels } else { out_channels };
ResnetBlock2D::new(vs.pp(i.to_string()), in_channels, conv_cfg)
})
.collect::<Result<Vec<_>>>()?
};
let downsampler = if config.add_downsample {
let downsample = Downsample2D::new(
vs.pp("downsamplers").pp("0"),
out_channels,
true,
out_channels,
config.downsample_padding,
)?;
Some(downsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "down-enc2d");
Ok(Self {
resnets,
downsampler,
span,
config,
})
}
}
impl Module for DownEncoderBlock2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for resnet in self.resnets.iter() {
xs = resnet.forward(&xs, None)?
}
match &self.downsampler {
Some(downsampler) => downsampler.forward(&xs),
None => Ok(xs),
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct UpDecoderBlock2DConfig {
pub num_layers: usize,
pub resnet_eps: f64,
pub resnet_groups: usize,
pub output_scale_factor: f64,
pub add_upsample: bool,
}
impl Default for UpDecoderBlock2DConfig {
fn default() -> Self {
Self {
num_layers: 1,
resnet_eps: 1e-6,
resnet_groups: 32,
output_scale_factor: 1.,
add_upsample: true,
}
}
}
#[derive(Debug)]
pub struct UpDecoderBlock2D {
resnets: Vec<ResnetBlock2D>,
upsampler: Option<Upsample2D>,
span: tracing::Span,
pub config: UpDecoderBlock2DConfig,
}
impl UpDecoderBlock2D {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
config: UpDecoderBlock2DConfig,
) -> Result<Self> {
let resnets: Vec<_> = {
let vs = vs.pp("resnets");
let conv_cfg = ResnetBlock2DConfig {
out_channels: Some(out_channels),
eps: config.resnet_eps,
groups: config.resnet_groups,
output_scale_factor: config.output_scale_factor,
temb_channels: None,
..Default::default()
};
(0..(config.num_layers))
.map(|i| {
let in_channels = if i == 0 { in_channels } else { out_channels };
ResnetBlock2D::new(vs.pp(i.to_string()), in_channels, conv_cfg)
})
.collect::<Result<Vec<_>>>()?
};
let upsampler = if config.add_upsample {
let upsample =
Upsample2D::new(vs.pp("upsamplers").pp("0"), out_channels, out_channels)?;
Some(upsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "up-dec2d");
Ok(Self {
resnets,
upsampler,
span,
config,
})
}
}
impl Module for UpDecoderBlock2D {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for resnet in self.resnets.iter() {
xs = resnet.forward(&xs, None)?
}
match &self.upsampler {
Some(upsampler) => upsampler.forward(&xs, None),
None => Ok(xs),
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct UNetMidBlock2DConfig {
pub num_layers: usize,
pub resnet_eps: f64,
pub resnet_groups: Option<usize>,
pub attn_num_head_channels: Option<usize>,
// attention_type "default"
pub output_scale_factor: f64,
}
impl Default for UNetMidBlock2DConfig {
fn default() -> Self {
Self {
num_layers: 1,
resnet_eps: 1e-6,
resnet_groups: Some(32),
attn_num_head_channels: Some(1),
output_scale_factor: 1.,
}
}
}
#[derive(Debug)]
pub struct UNetMidBlock2D {
resnet: ResnetBlock2D,
attn_resnets: Vec<(AttentionBlock, ResnetBlock2D)>,
span: tracing::Span,
pub config: UNetMidBlock2DConfig,
}
impl UNetMidBlock2D {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
temb_channels: Option<usize>,
config: UNetMidBlock2DConfig,
) -> Result<Self> {
let vs_resnets = vs.pp("resnets");
let vs_attns = vs.pp("attentions");
let resnet_groups = config
.resnet_groups
.unwrap_or_else(|| usize::min(in_channels / 4, 32));
let resnet_cfg = ResnetBlock2DConfig {
eps: config.resnet_eps,
groups: resnet_groups,
output_scale_factor: config.output_scale_factor,
temb_channels,
..Default::default()
};
let resnet = ResnetBlock2D::new(vs_resnets.pp("0"), in_channels, resnet_cfg)?;
let attn_cfg = AttentionBlockConfig {
num_head_channels: config.attn_num_head_channels,
num_groups: resnet_groups,
rescale_output_factor: config.output_scale_factor,
eps: config.resnet_eps,
};
let mut attn_resnets = vec![];
for index in 0..config.num_layers {
let attn = AttentionBlock::new(vs_attns.pp(index.to_string()), in_channels, attn_cfg)?;
let resnet = ResnetBlock2D::new(
vs_resnets.pp((index + 1).to_string()),
in_channels,
resnet_cfg,
)?;
attn_resnets.push((attn, resnet))
}
let span = tracing::span!(tracing::Level::TRACE, "mid2d");
Ok(Self {
resnet,
attn_resnets,
span,
config,
})
}
pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = self.resnet.forward(xs, temb)?;
for (attn, resnet) in self.attn_resnets.iter() {
xs = resnet.forward(&attn.forward(&xs)?, temb)?
}
Ok(xs)
}
}
#[derive(Debug, Clone, Copy)]
pub struct UNetMidBlock2DCrossAttnConfig {
pub num_layers: usize,
pub resnet_eps: f64,
pub resnet_groups: Option<usize>,
pub attn_num_head_channels: usize,
// attention_type "default"
pub output_scale_factor: f64,
pub cross_attn_dim: usize,
pub sliced_attention_size: Option<usize>,
pub use_linear_projection: bool,
pub transformer_layers_per_block: usize,
}
impl Default for UNetMidBlock2DCrossAttnConfig {
fn default() -> Self {
Self {
num_layers: 1,
resnet_eps: 1e-6,
resnet_groups: Some(32),
attn_num_head_channels: 1,
output_scale_factor: 1.,
cross_attn_dim: 1280,
sliced_attention_size: None, // Sliced attention disabled
use_linear_projection: false,
transformer_layers_per_block: 1,
}
}
}
#[derive(Debug)]
pub struct UNetMidBlock2DCrossAttn {
resnet: ResnetBlock2D,
attn_resnets: Vec<(SpatialTransformer, ResnetBlock2D)>,
span: tracing::Span,
pub config: UNetMidBlock2DCrossAttnConfig,
}
impl UNetMidBlock2DCrossAttn {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
temb_channels: Option<usize>,
use_flash_attn: bool,
config: UNetMidBlock2DCrossAttnConfig,
) -> Result<Self> {
let vs_resnets = vs.pp("resnets");
let vs_attns = vs.pp("attentions");
let resnet_groups = config
.resnet_groups
.unwrap_or_else(|| usize::min(in_channels / 4, 32));
let resnet_cfg = ResnetBlock2DConfig {
eps: config.resnet_eps,
groups: resnet_groups,
output_scale_factor: config.output_scale_factor,
temb_channels,
..Default::default()
};
let resnet = ResnetBlock2D::new(vs_resnets.pp("0"), in_channels, resnet_cfg)?;
let n_heads = config.attn_num_head_channels;
let attn_cfg = SpatialTransformerConfig {
depth: config.transformer_layers_per_block,
num_groups: resnet_groups,
context_dim: Some(config.cross_attn_dim),
sliced_attention_size: config.sliced_attention_size,
use_linear_projection: config.use_linear_projection,
};
let mut attn_resnets = vec![];
for index in 0..config.num_layers {
let attn = SpatialTransformer::new(
vs_attns.pp(index.to_string()),
in_channels,
n_heads,
in_channels / n_heads,
use_flash_attn,
attn_cfg,
)?;
let resnet = ResnetBlock2D::new(
vs_resnets.pp((index + 1).to_string()),
in_channels,
resnet_cfg,
)?;
attn_resnets.push((attn, resnet))
}
let span = tracing::span!(tracing::Level::TRACE, "xa-mid2d");
Ok(Self {
resnet,
attn_resnets,
span,
config,
})
}
pub fn forward(
&self,
xs: &Tensor,
temb: Option<&Tensor>,
encoder_hidden_states: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = self.resnet.forward(xs, temb)?;
for (attn, resnet) in self.attn_resnets.iter() {
xs = resnet.forward(&attn.forward(&xs, encoder_hidden_states)?, temb)?
}
Ok(xs)
}
}
#[derive(Debug, Clone, Copy)]
pub struct DownBlock2DConfig {
pub num_layers: usize,
pub resnet_eps: f64,
// resnet_time_scale_shift: "default"
// resnet_act_fn: "swish"
pub resnet_groups: usize,
pub output_scale_factor: f64,
pub add_downsample: bool,
pub downsample_padding: usize,
}
impl Default for DownBlock2DConfig {
fn default() -> Self {
Self {
num_layers: 1,
resnet_eps: 1e-6,
resnet_groups: 32,
output_scale_factor: 1.,
add_downsample: true,
downsample_padding: 1,
}
}
}
#[derive(Debug)]
pub struct DownBlock2D {
resnets: Vec<ResnetBlock2D>,
downsampler: Option<Downsample2D>,
span: tracing::Span,
pub config: DownBlock2DConfig,
}
impl DownBlock2D {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
temb_channels: Option<usize>,
config: DownBlock2DConfig,
) -> Result<Self> {
let vs_resnets = vs.pp("resnets");
let resnet_cfg = ResnetBlock2DConfig {
out_channels: Some(out_channels),
eps: config.resnet_eps,
output_scale_factor: config.output_scale_factor,
temb_channels,
..Default::default()
};
let resnets = (0..config.num_layers)
.map(|i| {
let in_channels = if i == 0 { in_channels } else { out_channels };
ResnetBlock2D::new(vs_resnets.pp(i.to_string()), in_channels, resnet_cfg)
})
.collect::<Result<Vec<_>>>()?;
let downsampler = if config.add_downsample {
let downsampler = Downsample2D::new(
vs.pp("downsamplers").pp("0"),
out_channels,
true,
out_channels,
config.downsample_padding,
)?;
Some(downsampler)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "down2d");
Ok(Self {
resnets,
downsampler,
span,
config,
})
}
pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<(Tensor, Vec<Tensor>)> {
let _enter = self.span.enter();
let mut xs = xs.clone();
let mut output_states = vec![];
for resnet in self.resnets.iter() {
xs = resnet.forward(&xs, temb)?;
output_states.push(xs.clone());
}
let xs = match &self.downsampler {
Some(downsampler) => {
let xs = downsampler.forward(&xs)?;
output_states.push(xs.clone());
xs
}
None => xs,
};
Ok((xs, output_states))
}
}
#[derive(Debug, Clone, Copy)]
pub struct CrossAttnDownBlock2DConfig {
pub downblock: DownBlock2DConfig,
pub attn_num_head_channels: usize,
pub cross_attention_dim: usize,
// attention_type: "default"
pub sliced_attention_size: Option<usize>,
pub use_linear_projection: bool,
pub transformer_layers_per_block: usize,
}
impl Default for CrossAttnDownBlock2DConfig {
fn default() -> Self {
Self {
downblock: Default::default(),
attn_num_head_channels: 1,
cross_attention_dim: 1280,
sliced_attention_size: None,
use_linear_projection: false,
transformer_layers_per_block: 1,
}
}
}
#[derive(Debug)]
pub struct CrossAttnDownBlock2D {
downblock: DownBlock2D,
attentions: Vec<SpatialTransformer>,
span: tracing::Span,
pub config: CrossAttnDownBlock2DConfig,
}
impl CrossAttnDownBlock2D {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
temb_channels: Option<usize>,
use_flash_attn: bool,
config: CrossAttnDownBlock2DConfig,
) -> Result<Self> {
let downblock = DownBlock2D::new(
vs.clone(),
in_channels,
out_channels,
temb_channels,
config.downblock,
)?;
let n_heads = config.attn_num_head_channels;
let cfg = SpatialTransformerConfig {
depth: config.transformer_layers_per_block,
context_dim: Some(config.cross_attention_dim),
num_groups: config.downblock.resnet_groups,
sliced_attention_size: config.sliced_attention_size,
use_linear_projection: config.use_linear_projection,
};
let vs_attn = vs.pp("attentions");
let attentions = (0..config.downblock.num_layers)
.map(|i| {
SpatialTransformer::new(
vs_attn.pp(i.to_string()),
out_channels,
n_heads,
out_channels / n_heads,
use_flash_attn,
cfg,
)
})
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "xa-down2d");
Ok(Self {
downblock,
attentions,
span,
config,
})
}
pub fn forward(
&self,
xs: &Tensor,
temb: Option<&Tensor>,
encoder_hidden_states: Option<&Tensor>,
) -> Result<(Tensor, Vec<Tensor>)> {
let _enter = self.span.enter();
let mut output_states = vec![];
let mut xs = xs.clone();
for (resnet, attn) in self.downblock.resnets.iter().zip(self.attentions.iter()) {
xs = resnet.forward(&xs, temb)?;
xs = attn.forward(&xs, encoder_hidden_states)?;
output_states.push(xs.clone());
}
let xs = match &self.downblock.downsampler {
Some(downsampler) => {
let xs = downsampler.forward(&xs)?;
output_states.push(xs.clone());
xs
}
None => xs,
};
Ok((xs, output_states))
}
}
#[derive(Debug, Clone, Copy)]
pub struct UpBlock2DConfig {
pub num_layers: usize,
pub resnet_eps: f64,
// resnet_time_scale_shift: "default"
// resnet_act_fn: "swish"
pub resnet_groups: usize,
pub output_scale_factor: f64,
pub add_upsample: bool,
}
impl Default for UpBlock2DConfig {
fn default() -> Self {
Self {
num_layers: 1,
resnet_eps: 1e-6,
resnet_groups: 32,
output_scale_factor: 1.,
add_upsample: true,
}
}
}
#[derive(Debug)]
pub struct UpBlock2D {
pub resnets: Vec<ResnetBlock2D>,
upsampler: Option<Upsample2D>,
span: tracing::Span,
pub config: UpBlock2DConfig,
}
impl UpBlock2D {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
prev_output_channels: usize,
out_channels: usize,
temb_channels: Option<usize>,
config: UpBlock2DConfig,
) -> Result<Self> {
let vs_resnets = vs.pp("resnets");
let resnet_cfg = ResnetBlock2DConfig {
out_channels: Some(out_channels),
temb_channels,
eps: config.resnet_eps,
output_scale_factor: config.output_scale_factor,
..Default::default()
};
let resnets = (0..config.num_layers)
.map(|i| {
let res_skip_channels = if i == config.num_layers - 1 {
in_channels
} else {
out_channels
};
let resnet_in_channels = if i == 0 {
prev_output_channels
} else {
out_channels
};
let in_channels = resnet_in_channels + res_skip_channels;
ResnetBlock2D::new(vs_resnets.pp(i.to_string()), in_channels, resnet_cfg)
})
.collect::<Result<Vec<_>>>()?;
let upsampler = if config.add_upsample {
let upsampler =
Upsample2D::new(vs.pp("upsamplers").pp("0"), out_channels, out_channels)?;
Some(upsampler)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "up2d");
Ok(Self {
resnets,
upsampler,
span,
config,
})
}
pub fn forward(
&self,
xs: &Tensor,
res_xs: &[Tensor],
temb: Option<&Tensor>,
upsample_size: Option<(usize, usize)>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for (index, resnet) in self.resnets.iter().enumerate() {
xs = Tensor::cat(&[&xs, &res_xs[res_xs.len() - index - 1]], 1)?;
xs = xs.contiguous()?;
xs = resnet.forward(&xs, temb)?;
}
match &self.upsampler {
Some(upsampler) => upsampler.forward(&xs, upsample_size),
None => Ok(xs),
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct CrossAttnUpBlock2DConfig {
pub upblock: UpBlock2DConfig,
pub attn_num_head_channels: usize,
pub cross_attention_dim: usize,
// attention_type: "default"
pub sliced_attention_size: Option<usize>,
pub use_linear_projection: bool,
pub transformer_layers_per_block: usize,
}
impl Default for CrossAttnUpBlock2DConfig {
fn default() -> Self {
Self {
upblock: Default::default(),
attn_num_head_channels: 1,
cross_attention_dim: 1280,
sliced_attention_size: None,
use_linear_projection: false,
transformer_layers_per_block: 1,
}
}
}
#[derive(Debug)]
pub struct CrossAttnUpBlock2D {
pub upblock: UpBlock2D,
pub attentions: Vec<SpatialTransformer>,
span: tracing::Span,
pub config: CrossAttnUpBlock2DConfig,
}
impl CrossAttnUpBlock2D {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
prev_output_channels: usize,
out_channels: usize,
temb_channels: Option<usize>,
use_flash_attn: bool,
config: CrossAttnUpBlock2DConfig,
) -> Result<Self> {
let upblock = UpBlock2D::new(
vs.clone(),
in_channels,
prev_output_channels,
out_channels,
temb_channels,
config.upblock,
)?;
let n_heads = config.attn_num_head_channels;
let cfg = SpatialTransformerConfig {
depth: config.transformer_layers_per_block,
context_dim: Some(config.cross_attention_dim),
num_groups: config.upblock.resnet_groups,
sliced_attention_size: config.sliced_attention_size,
use_linear_projection: config.use_linear_projection,
};
let vs_attn = vs.pp("attentions");
let attentions = (0..config.upblock.num_layers)
.map(|i| {
SpatialTransformer::new(
vs_attn.pp(i.to_string()),
out_channels,
n_heads,
out_channels / n_heads,
use_flash_attn,
cfg,
)
})
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "xa-up2d");
Ok(Self {
upblock,
attentions,
span,
config,
})
}
pub fn forward(
&self,
xs: &Tensor,
res_xs: &[Tensor],
temb: Option<&Tensor>,
upsample_size: Option<(usize, usize)>,
encoder_hidden_states: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for (index, resnet) in self.upblock.resnets.iter().enumerate() {
xs = Tensor::cat(&[&xs, &res_xs[res_xs.len() - index - 1]], 1)?;
xs = xs.contiguous()?;
xs = resnet.forward(&xs, temb)?;
xs = self.attentions[index].forward(&xs, encoder_hidden_states)?;
}
match &self.upblock.upsampler {
Some(upsampler) => upsampler.forward(&xs, upsample_size),
None => Ok(xs),
}
}
}
| candle/candle-transformers/src/models/stable_diffusion/unet_2d_blocks.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/unet_2d_blocks.rs",
"repo_id": "candle",
"token_count": 13813
} |
use candle::{Module, Result, Tensor};
use candle_nn::{linear, Linear, VarBuilder};
// A simplified version of:
// https://github.com/huggingface/diffusers/blob/119ad2c3dc8a8fb8446a83f4bf6f20929487b47f/src/diffusers/models/attention_processor.py#L38
#[derive(Debug)]
pub struct Attention {
to_q: Linear,
to_k: Linear,
to_v: Linear,
to_out: Linear,
heads: usize,
scale: f64,
use_flash_attn: bool,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl Attention {
pub fn new(
query_dim: usize,
heads: usize,
dim_head: usize,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
let inner_dim = dim_head * heads;
let scale = 1.0 / f64::sqrt(dim_head as f64);
let to_q = linear(query_dim, inner_dim, vb.pp("to_q"))?;
let to_k = linear(query_dim, inner_dim, vb.pp("to_k"))?;
let to_v = linear(query_dim, inner_dim, vb.pp("to_v"))?;
let to_out = linear(inner_dim, query_dim, vb.pp("to_out.0"))?;
Ok(Self {
to_q,
to_k,
to_v,
to_out,
scale,
heads,
use_flash_attn,
})
}
fn batch_to_head_dim(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, dim) = xs.dims3()?;
xs.reshape((b_size / self.heads, self.heads, seq_len, dim))?
.permute((0, 2, 1, 3))?
.reshape((b_size / self.heads, seq_len, dim * self.heads))
}
fn head_to_batch_dim(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, dim) = xs.dims3()?;
xs.reshape((b_size, seq_len, self.heads, dim / self.heads))?
.permute((0, 2, 1, 3))?
.reshape((b_size * self.heads, seq_len, dim / self.heads))
}
fn get_attention_scores(&self, query: &Tensor, key: &Tensor) -> Result<Tensor> {
let attn_probs = (query.matmul(&key.t()?)? * self.scale)?;
candle_nn::ops::softmax_last_dim(&attn_probs)
}
pub fn forward(&self, xs: &Tensor, encoder_hidden_states: &Tensor) -> Result<Tensor> {
let (b_size, channel, h, w) = xs.dims4()?;
let xs = xs.reshape((b_size, channel, h * w))?.t()?;
let query = self.to_q.forward(&xs)?;
let key = self.to_k.forward(encoder_hidden_states)?;
let value = self.to_v.forward(encoder_hidden_states)?;
let query = self.head_to_batch_dim(&query)?;
let key = self.head_to_batch_dim(&key)?;
let value = self.head_to_batch_dim(&value)?;
let xs = if self.use_flash_attn {
let init_dtype = query.dtype();
let q = query
.to_dtype(candle::DType::F16)?
.unsqueeze(0)?
.transpose(1, 2)?;
let k = key
.to_dtype(candle::DType::F16)?
.unsqueeze(0)?
.transpose(1, 2)?;
let v = value
.to_dtype(candle::DType::F16)?
.unsqueeze(0)?
.transpose(1, 2)?;
flash_attn(&q, &k, &v, self.scale as f32, false)?
.transpose(1, 2)?
.squeeze(0)?
.to_dtype(init_dtype)?
} else {
let attn_prs = self.get_attention_scores(&query, &key)?;
attn_prs.matmul(&value)?
};
let xs = self.batch_to_head_dim(&xs)?;
self.to_out
.forward(&xs)?
.t()?
.reshape((b_size, channel, h, w))
}
}
| candle/candle-transformers/src/models/wuerstchen/attention_processor.rs/0 | {
"file_path": "candle/candle-transformers/src/models/wuerstchen/attention_processor.rs",
"repo_id": "candle",
"token_count": 2076
} |
use candle::Result;
use candle_transformers::object_detection::{
non_maximum_suppression, soft_non_maximum_suppression, Bbox,
};
#[test]
fn nms_basic() -> Result<()> {
// Boxes based upon https://thepythoncode.com/article/non-maximum-suppression-using-opencv-in-python
let mut bboxes = vec![vec![
Bbox {
xmin: 245.0,
ymin: 305.0,
xmax: 575.0,
ymax: 490.0,
confidence: 0.9,
data: (),
}, // Box 1
Bbox {
xmin: 235.0,
ymin: 300.0,
xmax: 485.0,
ymax: 515.0,
confidence: 0.8,
data: (),
}, // Box 2
Bbox {
xmin: 305.0,
ymin: 270.0,
xmax: 540.0,
ymax: 500.0,
confidence: 0.6,
data: (),
}, // Box 3
]];
non_maximum_suppression(&mut bboxes, 0.5);
let bboxes = bboxes.into_iter().next().unwrap();
assert_eq!(bboxes.len(), 1);
assert_eq!(bboxes[0].confidence, 0.9);
Ok(())
}
#[test]
fn softnms_basic_functionality() -> Result<()> {
let mut bboxes = vec![vec![
Bbox {
xmin: 0.0,
ymin: 0.0,
xmax: 1.0,
ymax: 1.0,
confidence: 0.5,
data: (),
},
Bbox {
xmin: 0.1,
ymin: 0.1,
xmax: 1.1,
ymax: 1.1,
confidence: 0.9,
data: (),
},
Bbox {
xmin: 0.2,
ymin: 0.2,
xmax: 1.2,
ymax: 1.2,
confidence: 0.6,
data: (),
},
]];
soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5));
// Should decay boxes following highest confidence box
assert!(bboxes[0][0].confidence == 0.9);
assert!(bboxes[0][1].confidence < 0.5);
assert!(bboxes[0][2].confidence < 0.6);
Ok(())
}
#[test]
fn softnms_confidence_decay() -> Result<()> {
let mut bboxes = vec![vec![
Bbox {
xmin: 0.0,
ymin: 0.0,
xmax: 1.0,
ymax: 1.0,
confidence: 0.9,
data: (),
}, // Reference box
Bbox {
xmin: 0.1,
ymin: 0.1,
xmax: 1.1,
ymax: 1.1,
confidence: 0.8,
data: (),
}, // Overlapping box
]];
soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5));
// Check that confidence of the overlapping box is decayed
assert!(bboxes[0][0].confidence == 0.9);
assert!(bboxes[0][1].confidence < 0.8);
Ok(())
}
#[test]
fn softnms_confidence_threshold() -> Result<()> {
let mut bboxes = vec![vec![
Bbox {
xmin: 0.0,
ymin: 0.0,
xmax: 1.0,
ymax: 1.0,
confidence: 0.9,
data: (),
},
Bbox {
xmin: 0.1,
ymin: 0.1,
xmax: 1.1,
ymax: 1.1,
confidence: 0.05,
data: (),
},
]];
soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5));
// Box with confidence below the threshold should be removed
assert_eq!(bboxes[0].len(), 2);
assert_eq!(bboxes[0][0].confidence, 0.9);
assert_eq!(bboxes[0][1].confidence, 0.00);
Ok(())
}
#[test]
fn softnms_no_overlap() -> Result<()> {
let mut bboxes = vec![vec![
Bbox {
xmin: 0.0,
ymin: 0.0,
xmax: 1.0,
ymax: 1.0,
confidence: 0.9,
data: (),
},
Bbox {
xmin: 2.0,
ymin: 2.0,
xmax: 3.0,
ymax: 3.0,
confidence: 0.8,
data: (),
},
]];
soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5));
// Both boxes should remain as they do not significantly overlap
assert_eq!(bboxes[0].len(), 2);
assert_eq!(bboxes[0][0].confidence, 0.9);
assert_eq!(bboxes[0][1].confidence, 0.8);
Ok(())
}
#[test]
fn softnms_no_bbox() -> Result<()> {
let mut bboxes: Vec<Vec<Bbox<()>>> = vec![];
soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5));
assert!(bboxes.is_empty());
Ok(())
}
#[test]
fn softnms_single_bbox() -> Result<()> {
let mut bboxes = vec![vec![Bbox {
xmin: 0.0,
ymin: 0.0,
xmax: 1.0,
ymax: 1.0,
confidence: 0.9,
data: (),
}]];
soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5));
assert_eq!(bboxes[0].len(), 1);
Ok(())
}
#[test]
fn softnms_equal_confidence_overlap() -> Result<()> {
let mut bboxes = vec![vec![
Bbox {
xmin: 0.0,
ymin: 0.0,
xmax: 1.0,
ymax: 1.0,
confidence: 0.5,
data: (),
},
Bbox {
xmin: 0.1,
ymin: 0.1,
xmax: 1.1,
ymax: 1.1,
confidence: 0.5,
data: (),
},
]];
soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5));
// First box will be reference box, second box should be decayed
// Implementation must change to have both be decayed
assert_eq!(bboxes[0].len(), 2);
assert!(bboxes[0][0].confidence == 0.5);
assert!(bboxes[0][1].confidence < 0.5);
Ok(())
}
| candle/candle-transformers/tests/nms_tests.rs/0 | {
"file_path": "candle/candle-transformers/tests/nms_tests.rs",
"repo_id": "candle",
"token_count": 3139
} |
use candle::Result;
/// This is a wrapper around a tokenizer to ensure that tokens can be returned to the user in a
/// streaming way rather than having to wait for the full decoding.
pub struct TokenOutputStream {
tokenizer: tokenizers::Tokenizer,
tokens: Vec<u32>,
prev_index: usize,
current_index: usize,
}
impl TokenOutputStream {
pub fn new(tokenizer: tokenizers::Tokenizer) -> Self {
Self {
tokenizer,
tokens: Vec::new(),
prev_index: 0,
current_index: 0,
}
}
pub fn into_inner(self) -> tokenizers::Tokenizer {
self.tokenizer
}
fn decode(&self, tokens: &[u32]) -> Result<String> {
match self.tokenizer.decode(tokens, true) {
Ok(str) => Ok(str),
Err(err) => candle::bail!("cannot decode: {err}"),
}
}
// https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/models/model.py#L68
pub fn next_token(&mut self, token: u32) -> Result<Option<String>> {
let prev_text = if self.tokens.is_empty() {
String::new()
} else {
let tokens = &self.tokens[self.prev_index..self.current_index];
self.decode(tokens)?
};
self.tokens.push(token);
let text = self.decode(&self.tokens[self.prev_index..])?;
if text.len() > prev_text.len() && text.chars().last().unwrap().is_ascii() {
let text = text.split_at(prev_text.len());
self.prev_index = self.current_index;
self.current_index = self.tokens.len();
Ok(Some(text.1.to_string()))
} else {
Ok(None)
}
}
pub fn decode_rest(&self) -> Result<Option<String>> {
let prev_text = if self.tokens.is_empty() {
String::new()
} else {
let tokens = &self.tokens[self.prev_index..self.current_index];
self.decode(tokens)?
};
let text = self.decode(&self.tokens[self.prev_index..])?;
if text.len() > prev_text.len() {
let text = text.split_at(prev_text.len());
Ok(Some(text.1.to_string()))
} else {
Ok(None)
}
}
pub fn decode_all(&self) -> Result<String> {
self.decode(&self.tokens)
}
pub fn get_token(&self, token_s: &str) -> Option<u32> {
self.tokenizer.get_vocab(true).get(token_s).copied()
}
pub fn tokenizer(&self) -> &tokenizers::Tokenizer {
&self.tokenizer
}
pub fn clear(&mut self) {
self.tokens.clear();
self.prev_index = 0;
self.current_index = 0;
}
}
| candle/candle-wasm-examples/blip/src/token_output_stream.rs/0 | {
"file_path": "candle/candle-wasm-examples/blip/src/token_output_stream.rs",
"repo_id": "candle",
"token_count": 1295
} |
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type" />
<title>Candle Segment Anything Model (SAM) Rust/WASM</title>
</head>
<body></body>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<style>
@import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap");
html,
body {
font-family: "Source Sans 3", sans-serif;
}
</style>
<script src="https://cdn.tailwindcss.com"></script>
<script type="module">
// base url for image examples
const MODEL_BASEURL =
"https://huggingface.co/lmz/candle-sam/resolve/main/";
// models base url
const MODELS = {
sam_mobile_tiny: {
url: "mobile_sam-tiny-vitt.safetensors",
},
sam_base: {
url: "sam_vit_b_01ec64.safetensors",
},
};
const samWorker = new Worker("./samWorker.js", { type: "module" });
async function segmentPoints(
modelURL, // URL to the weights file
modelID, // model ID
imageURL, // URL to the image file
points // {x, y} points to prompt image
) {
return new Promise((resolve, reject) => {
function messageHandler(event) {
console.log(event.data);
if ("status" in event.data) {
updateStatus(event.data);
}
if ("error" in event.data) {
samWorker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete-embedding") {
samWorker.removeEventListener("message", messageHandler);
resolve();
}
if (event.data.status === "complete") {
samWorker.removeEventListener("message", messageHandler);
resolve(event.data.output);
}
}
samWorker.addEventListener("message", messageHandler);
samWorker.postMessage({
modelURL,
modelID,
imageURL,
points,
});
});
}
function updateStatus(statusMessage) {
statusOutput.innerText = event.data.message;
}
let copyMaskURL = null;
let copyImageURL = null;
const clearBtn = document.querySelector("#clear-btn");
const maskBtn = document.querySelector("#mask-btn");
const undoBtn = document.querySelector("#undo-btn");
const downloadBtn = document.querySelector("#download-btn");
const canvas = document.querySelector("#canvas");
const mask = document.querySelector("#mask");
const ctxCanvas = canvas.getContext("2d");
const ctxMask = mask.getContext("2d");
const fileUpload = document.querySelector("#file-upload");
const dropArea = document.querySelector("#drop-area");
const dropButtons = document.querySelector("#drop-buttons");
const imagesExamples = document.querySelector("#image-select");
const modelSelection = document.querySelector("#model");
const statusOutput = document.querySelector("#output-status");
//add event listener to file input
fileUpload.addEventListener("input", (e) => {
const target = e.target;
if (target.files.length > 0) {
const href = URL.createObjectURL(target.files[0]);
clearImageCanvas();
copyImageURL = href;
drawImageCanvas(href);
setImageEmbeddings(href);
togglePointMode(false);
}
});
// add event listener to drop-area
dropArea.addEventListener("dragenter", (e) => {
e.preventDefault();
dropArea.classList.add("border-blue-700");
});
dropArea.addEventListener("dragleave", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
});
dropArea.addEventListener("dragover", (e) => {
e.preventDefault();
});
dropArea.addEventListener("drop", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
const url = e.dataTransfer.getData("text/uri-list");
const files = e.dataTransfer.files;
if (files.length > 0) {
const href = URL.createObjectURL(files[0]);
clearImageCanvas();
copyImageURL = href;
drawImageCanvas(href);
setImageEmbeddings(href);
togglePointMode(false);
} else if (url) {
clearImageCanvas();
copyImageURL = url;
drawImageCanvas(url);
setImageEmbeddings(url);
togglePointMode(false);
}
});
let hasImage = false;
let isSegmenting = false;
let isEmbedding = false;
let currentImageURL = "";
let pointArr = [];
let bgPointMode = false;
//add event listener to image examples
imagesExamples.addEventListener("click", (e) => {
if (isEmbedding || isSegmenting) {
return;
}
const target = e.target;
if (target.nodeName === "IMG") {
const href = target.src;
clearImageCanvas();
copyImageURL = href;
drawImageCanvas(href);
setImageEmbeddings(href);
}
});
//add event listener to mask button
maskBtn.addEventListener("click", () => {
togglePointMode();
});
//add event listener to clear button
clearBtn.addEventListener("click", () => {
clearImageCanvas();
togglePointMode(false);
pointArr = [];
});
//add event listener to undo button
undoBtn.addEventListener("click", () => {
undoPoint();
});
// add event to download btn
downloadBtn.addEventListener("click", async () => {
// Function to load image blobs as Image elements asynchronously
const loadImageAsync = (imageURL) => {
return new Promise((resolve) => {
const img = new Image();
img.onload = () => {
resolve(img);
};
img.crossOrigin = "anonymous";
img.src = imageURL;
});
};
const originalImage = await loadImageAsync(copyImageURL);
const maskImage = await loadImageAsync(copyMaskURL);
// create main a board to draw
const canvas = document.createElement("canvas");
const ctx = canvas.getContext("2d");
canvas.width = originalImage.width;
canvas.height = originalImage.height;
// Perform the mask operation
ctx.drawImage(maskImage, 0, 0);
ctx.globalCompositeOperation = "source-in";
ctx.drawImage(originalImage, 0, 0);
// to blob
const blobPromise = new Promise((resolve) => {
canvas.toBlob(resolve);
});
const blob = await blobPromise;
const resultURL = URL.createObjectURL(blob);
// download
const link = document.createElement("a");
link.href = resultURL;
link.download = "cutout.png";
link.click();
});
//add click event to canvas
canvas.addEventListener("click", async (event) => {
if (!hasImage || isEmbedding || isSegmenting) {
return;
}
const backgroundMode = event.shiftKey ? bgPointMode^event.shiftKey : bgPointMode;
const targetBox = event.target.getBoundingClientRect();
const x = (event.clientX - targetBox.left) / targetBox.width;
const y = (event.clientY - targetBox.top) / targetBox.height;
const ptsToRemove = [];
for (const [idx, pts] of pointArr.entries()) {
const d = Math.sqrt((pts[0] - x) ** 2 + (pts[1] - y) ** 2);
if (d < 6 / targetBox.width) {
ptsToRemove.push(idx);
}
}
if (ptsToRemove.length > 0) {
pointArr = pointArr.filter((_, idx) => !ptsToRemove.includes(idx));
} else {
pointArr = [...pointArr, [x, y, !backgroundMode]];
}
undoBtn.disabled = false;
downloadBtn.disabled = false;
if (pointArr.length == 0) {
ctxMask.clearRect(0, 0, canvas.width, canvas.height);
undoBtn.disabled = true;
downloadBtn.disabled = true;
return;
}
isSegmenting = true;
const { maskURL } = await getSegmentationMask(pointArr);
isSegmenting = false;
copyMaskURL = maskURL;
drawMask(maskURL, pointArr);
});
async function undoPoint() {
if (!hasImage || isEmbedding || isSegmenting) {
return;
}
if (pointArr.length === 0) {
return;
}
pointArr.pop();
if (pointArr.length === 0) {
ctxMask.clearRect(0, 0, canvas.width, canvas.height);
undoBtn.disabled = true;
return;
}
isSegmenting = true;
const { maskURL } = await getSegmentationMask(pointArr);
isSegmenting = false;
copyMaskURL = maskURL;
drawMask(maskURL, pointArr);
}
function togglePointMode(mode) {
bgPointMode = mode === undefined ? !bgPointMode : mode;
maskBtn.querySelector("span").innerText = bgPointMode
? "Background Point"
: "Mask Point";
if (bgPointMode) {
maskBtn.querySelector("#mask-circle").setAttribute("hidden", "");
maskBtn.querySelector("#unmask-circle").removeAttribute("hidden");
} else {
maskBtn.querySelector("#mask-circle").removeAttribute("hidden");
maskBtn.querySelector("#unmask-circle").setAttribute("hidden", "");
}
}
async function getSegmentationMask(points) {
const modelID = modelSelection.value;
const modelURL = MODEL_BASEURL + MODELS[modelID].url;
const imageURL = currentImageURL;
const { maskURL } = await segmentPoints(
modelURL,
modelID,
imageURL,
points
);
return { maskURL };
}
async function setImageEmbeddings(imageURL) {
if (isEmbedding) {
return;
}
canvas.classList.remove("cursor-pointer");
canvas.classList.add("cursor-wait");
clearBtn.disabled = true;
const modelID = modelSelection.value;
const modelURL = MODEL_BASEURL + MODELS[modelID].url;
isEmbedding = true;
await segmentPoints(modelURL, modelID, imageURL);
canvas.classList.remove("cursor-wait");
canvas.classList.add("cursor-pointer");
clearBtn.disabled = false;
isEmbedding = false;
currentImageURL = imageURL;
}
function clearImageCanvas() {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
ctxMask.clearRect(0, 0, canvas.width, canvas.height);
hasImage = false;
isEmbedding = false;
isSegmenting = false;
currentImageURL = "";
pointArr = [];
clearBtn.disabled = true;
canvas.parentElement.style.height = "auto";
dropButtons.classList.remove("invisible");
}
function drawMask(maskURL, points) {
if (!maskURL) {
throw new Error("No mask URL provided");
}
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
mask.width = canvas.width;
mask.height = canvas.height;
ctxMask.save();
ctxMask.drawImage(canvas, 0, 0);
ctxMask.globalCompositeOperation = "source-atop";
ctxMask.fillStyle = "rgba(255, 0, 0, 0.6)";
ctxMask.fillRect(0, 0, canvas.width, canvas.height);
ctxMask.globalCompositeOperation = "destination-in";
ctxMask.drawImage(img, 0, 0);
ctxMask.globalCompositeOperation = "source-over";
for (const pt of points) {
if (pt[2]) {
ctxMask.fillStyle = "rgba(0, 255, 255, 1)";
} else {
ctxMask.fillStyle = "rgba(255, 255, 0, 1)";
}
ctxMask.beginPath();
ctxMask.arc(
pt[0] * canvas.width,
pt[1] * canvas.height,
3,
0,
2 * Math.PI
);
ctxMask.fill();
}
ctxMask.restore();
};
img.src = maskURL;
}
function drawImageCanvas(imgURL) {
if (!imgURL) {
throw new Error("No image URL provided");
}
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctxCanvas.drawImage(img, 0, 0);
canvas.parentElement.style.height = canvas.offsetHeight + "px";
hasImage = true;
clearBtn.disabled = false;
dropButtons.classList.add("invisible");
};
img.src = imgURL;
}
const observer = new ResizeObserver((entries) => {
for (let entry of entries) {
if (entry.target === canvas) {
canvas.parentElement.style.height = canvas.offsetHeight + "px";
}
}
});
observer.observe(canvas);
</script>
</head>
<body class="container max-w-4xl mx-auto p-4">
<main class="grid grid-cols-1 gap-8 relative">
<span class="absolute text-5xl -ml-[1em]">🕯️</span>
<div>
<h1 class="text-5xl font-bold">Candle Segment Anything</h1>
<h2 class="text-2xl font-bold">Rust/WASM Demo</h2>
<p class="max-w-lg">
Zero-shot image segmentation with
<a
href="https://segment-anything.com"
class="underline hover:text-blue-500 hover:no-underline"
target="_blank"
>Segment Anything Model (SAM)</a
>
and
<a
href="https://github.com/ChaoningZhang/MobileSAM"
class="underline hover:text-blue-500 hover:no-underline"
target="_blank"
>MobileSAM </a
>. It runs in the browser with a WASM runtime built with
<a
href="https://github.com/huggingface/candle/"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>Candle
</a>
</p>
</div>
<div>
<label for="model" class="font-medium">Models Options: </label>
<select
id="model"
class="border-2 border-gray-500 rounded-md font-light">
<option value="sam_mobile_tiny" selected>
Mobile SAM Tiny (40.6 MB)
</option>
<option value="sam_base">SAM Base (375 MB)</option>
</select>
</div>
<div>
<p class="text-xs italic max-w-lg">
<b>Note:</b>
The model's first run may take a few seconds as it loads and caches
the model in the browser, and then creates the image embeddings. Any
subsequent clicks on points will be significantly faster.
</p>
</div>
<div class="relative max-w-2xl">
<div class="flex justify-between items-center">
<div class="px-2 rounded-md inline text-xs">
<span id="output-status" class="m-auto font-light"></span>
</div>
<div class="flex gap-2">
<button
id="mask-btn"
title="Toggle Mask Point and Background Point"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center">
<span>Mask Point</span>
<svg
xmlns="http://www.w3.org/2000/svg"
height="1em"
viewBox="0 0 512 512">
<path
id="mask-circle"
d="M256 512a256 256 0 1 0 0-512 256 256 0 1 0 0 512z" />
<path
id="unmask-circle"
hidden
d="M464 256a208 208 0 1 0-416 0 208 208 0 1 0 416 0zM0 256a256 256 0 1 1 512 0 256 256 0 1 1-512 0z" />
</svg>
</button>
<button
id="undo-btn"
disabled
title="Undo Last Point"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center">
<svg
xmlns="http://www.w3.org/2000/svg"
height="1em"
viewBox="0 0 512 512">
<path
d="M48.5 224H40a24 24 0 0 1-24-24V72a24 24 0 0 1 41-17l41.6 41.6a224 224 0 1 1-1 317.8 32 32 0 0 1 45.3-45.3 160 160 0 1 0 1-227.3L185 183a24 24 0 0 1-17 41H48.5z" />
</svg>
</button>
<button
id="clear-btn"
disabled
title="Clear Image"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center">
<svg
class=""
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 13 12"
height="1em">
<path
d="M1.6.7 12 11.1M12 .7 1.6 11.1"
stroke="#2E3036"
stroke-width="2" />
</svg>
</button>
</div>
</div>
<div
id="drop-area"
class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative p-20 w-full overflow-hidden">
<div
id="drop-buttons"
class="flex flex-col items-center justify-center space-y-1 text-center relative z-10">
<svg
width="25"
height="25"
viewBox="0 0 25 25"
fill="none"
xmlns="http://www.w3.org/2000/svg">
<path
d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z"
fill="#000" />
</svg>
<div class="flex text-sm text-gray-600">
<label
for="file-upload"
class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700">
<span>Drag and drop your image here</span>
<span class="block text-xs">or</span>
<span class="block text-xs">Click to upload</span>
</label>
</div>
<input
id="file-upload"
name="file-upload"
type="file"
class="sr-only" />
</div>
<canvas id="canvas" class="absolute w-full"></canvas>
<canvas
id="mask"
class="pointer-events-none absolute w-full"></canvas>
</div>
<div class="text-right py-2">
<button
id="share-btn"
class="bg-white rounded-md hover:outline outline-orange-200 disabled:opacity-50 invisible">
<img
src="https://huggingface.co/datasets/huggingface/badges/raw/main/share-to-community-sm.svg" />
</button>
<button
id="download-btn"
title="Copy result (.png)"
disabled
class="p-1 px-2 text-xs font-medium bg-white rounded-2xl outline outline-gray-200 hover:outline-orange-200 disabled:opacity-50"
>
Download Cut-Out
</button>
</div>
</div>
<div>
<div
class="flex gap-3 items-center overflow-x-scroll"
id="image-select">
<h3 class="font-medium">Examples:</h3>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg"
class="cursor-pointer w-24 h-24 object-cover" />
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg"
class="cursor-pointer w-24 h-24 object-cover" />
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg"
class="cursor-pointer w-24 h-24 object-cover" />
</div>
</div>
</main>
</body>
</html>
| candle/candle-wasm-examples/segment-anything/lib-example.html/0 | {
"file_path": "candle/candle-wasm-examples/segment-anything/lib-example.html",
"repo_id": "candle",
"token_count": 10333
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Welcome to Candle!</title>
<link data-trunk rel="copy-file" href="yolov8s.safetensors" />
<link data-trunk rel="copy-file" href="bike.jpeg" />
<link data-trunk rel="rust" href="Cargo.toml" data-bin="app" data-type="main" />
<link data-trunk rel="rust" href="Cargo.toml" data-bin="worker" data-type="worker" />
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.4.1/milligram.css">
</head>
<body></body>
</html>
| candle/candle-wasm-examples/yolo/index.html/0 | {
"file_path": "candle/candle-wasm-examples/yolo/index.html",
"repo_id": "candle",
"token_count": 322
} |
[package]
name = "tensor-tools"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
[dependencies]
anyhow = { workspace = true }
candle = { workspace = true }
clap = { workspace = true }
rayon = { workspace = true }
safetensors = { workspace = true }
| candle/tensor-tools/Cargo.toml/0 | {
"file_path": "candle/tensor-tools/Cargo.toml",
"repo_id": "candle",
"token_count": 119
} |
apiVersion: apps/v1
kind: Deployment
metadata:
labels: {{ include "labels.standard" . | nindent 4 }}
name: {{ include "name" . }}
namespace: {{ .Release.Namespace }}
{{- if .Values.infisical.enabled }}
annotations:
secrets.infisical.com/auto-reload: "true"
{{- end }}
spec:
progressDeadlineSeconds: 600
{{- if not $.Values.autoscaling.enabled }}
replicas: {{ .Values.replicas }}
{{- end }}
revisionHistoryLimit: 10
selector:
matchLabels: {{ include "labels.standard" . | nindent 6 }}
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels: {{ include "labels.standard" . | nindent 8 }}
{{- if $.Values.envVars.NODE_LOG_STRUCTURED_DATA }}
annotations:
co.elastic.logs/json.expand_keys: "true"
{{- end }}
spec:
{{- if .Values.serviceAccount.enabled }}
serviceAccountName: "{{ .Values.serviceAccount.name | default (include "name" .) }}"
{{- end }}
containers:
- name: chat-ui
image: "{{ .Values.image.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
readinessProbe:
failureThreshold: 30
periodSeconds: 10
httpGet:
path: {{ $.Values.envVars.APP_BASE | default "" }}/healthcheck
port: {{ $.Values.envVars.APP_PORT | default 3000 | int }}
livenessProbe:
failureThreshold: 30
periodSeconds: 10
httpGet:
path: {{ $.Values.envVars.APP_BASE | default "" }}/healthcheck
port: {{ $.Values.envVars.APP_PORT | default 3000 | int }}
ports:
- containerPort: {{ $.Values.envVars.APP_PORT | default 3000 | int }}
name: http
protocol: TCP
{{- if $.Values.monitoring.enabled }}
- containerPort: {{ $.Values.envVars.METRICS_PORT | default 5565 | int }}
name: metrics
protocol: TCP
{{- end }}
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- with $.Values.extraEnv }}
env:
{{- toYaml . | nindent 14 }}
{{- end }}
envFrom:
- configMapRef:
name: {{ include "name" . }}
{{- if $.Values.infisical.enabled }}
- secretRef:
name: {{ include "name" $ }}-secs
{{- end }}
{{- with $.Values.extraEnvFrom }}
{{- toYaml . | nindent 14 }}
{{- end }}
nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }}
tolerations: {{ toYaml .Values.tolerations | nindent 8 }}
volumes:
- name: config
configMap:
name: {{ include "name" . }}
| chat-ui/chart/templates/deployment.yaml/0 | {
"file_path": "chat-ui/chart/templates/deployment.yaml",
"repo_id": "chat-ui",
"token_count": 1334
} |
# Amazon Web Services (AWS)
| Feature | Available |
| --------------------------- | --------- |
| [Tools](../tools) | No |
| [Multimodal](../multimodal) | No |
You may specify your Amazon SageMaker instance as an endpoint for Chat UI:
```ini
MODELS=`[{
"name": "your-model",
"displayName": "Your Model",
"description": "Your description",
"parameters": {
"max_new_tokens": 4096
},
"endpoints": [
{
"type" : "aws",
"service" : "sagemaker"
"url": "",
"accessKey": "",
"secretKey" : "",
"sessionToken": "",
"region": "",
"weight": 1
}
]
}]`
```
You can also set `"service": "lambda"` to use a lambda instance.
You can get the `accessKey` and `secretKey` from your AWS user, under programmatic access.
| chat-ui/docs/source/configuration/models/providers/aws.md/0 | {
"file_path": "chat-ui/docs/source/configuration/models/providers/aws.md",
"repo_id": "chat-ui",
"token_count": 348
} |
# 🤗 Chat UI
Open source chat interface with support for tools, web search, multimodal and many API providers. The app uses MongoDB and SvelteKit behind the scenes. Try the live version of the app called [HuggingChat on hf.co/chat](https://huggingface.co/chat) or [setup your own instance](./installation/spaces).
🔧 **[Tools](./configuration/models/tools)**: Function calling with custom tools and support for [Zero GPU spaces](https://huggingface.co/spaces/enzostvs/zero-gpu-spaces)
🔍 **[Web Search](./configuration/web-search)**: Automated web search, scraping and RAG for all models
🐙 **[Multimodal](./configuration/models/multimodal)**: Accepts image file uploads on supported providers
👤 **[OpenID](./configuration/open-id)**: Optionally setup OpenID for user authentication
<div class="flex gap-x-4">
<div>
Tools
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-light.png" height="auto"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-dark.png" height="auto"/>
</div>
</div>
<div>
Web Search
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-light.png" height="auto"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-dark.png" height="auto"/>
</div>
</div>
</div>
## Quickstart
You can quickly have a locally running chat-ui & LLM text-generation server thanks to chat-ui's [llama.cpp server support](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp).
**Step 1 (Start llama.cpp server):**
```bash
# install llama.cpp
brew install llama.cpp
# start llama.cpp server (using hf.co/microsoft/Phi-3-mini-4k-instruct-gguf as an example)
llama-server --hf-repo microsoft/Phi-3-mini-4k-instruct-gguf --hf-file Phi-3-mini-4k-instruct-q4.gguf -c 4096
```
A local LLaMA.cpp HTTP Server will start on `http://localhost:8080`. Read more [here](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp).
**Step 2 (tell chat-ui to use local llama.cpp server):**
Add the following to your `.env.local`:
```ini
MODELS=`[
{
"name": "Local microsoft/Phi-3-mini-4k-instruct-gguf",
"tokenizer": "microsoft/Phi-3-mini-4k-instruct-gguf",
"preprompt": "",
"chatPromptTemplate": "<s>{{preprompt}}{{#each messages}}{{#ifUser}}<|user|>\n{{content}}<|end|>\n<|assistant|>\n{{/ifUser}}{{#ifAssistant}}{{content}}<|end|>\n{{/ifAssistant}}{{/each}}",
"parameters": {
"stop": ["<|end|>", "<|endoftext|>", "<|assistant|>"],
"temperature": 0.7,
"max_new_tokens": 1024,
"truncate": 3071
},
"endpoints": [{
"type" : "llamacpp",
"baseURL": "http://localhost:8080"
}],
},
]`
```
Read more [here](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp).
**Step 3 (make sure you have MongoDb running locally):**
```bash
docker run -d -p 27017:27017 --name mongo-chatui mongo:latest
```
Read more [here](https://github.com/huggingface/chat-ui?tab=Readme-ov-file#database).
**Step 4 (start chat-ui):**
```bash
git clone https://github.com/huggingface/chat-ui
cd chat-ui
npm install
npm run dev -- --open
```
Read more [here](https://github.com/huggingface/chat-ui?tab=readme-ov-file#launch).
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/llamacpp-light.png" height="auto"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/llamacpp-dark.png" height="auto"/>
</div>
| chat-ui/docs/source/index.md/0 | {
"file_path": "chat-ui/docs/source/index.md",
"repo_id": "chat-ui",
"token_count": 1435
} |
export function clickOutside(element: HTMLElement, callbackFunction: () => void) {
function onClick(event: MouseEvent) {
if (!element.contains(event.target as Node)) {
callbackFunction();
}
}
document.body.addEventListener("click", onClick);
return {
update(newCallbackFunction: () => void) {
callbackFunction = newCallbackFunction;
},
destroy() {
document.body.removeEventListener("click", onClick);
},
};
}
| chat-ui/src/lib/actions/clickOutside.ts/0 | {
"file_path": "chat-ui/src/lib/actions/clickOutside.ts",
"repo_id": "chat-ui",
"token_count": 144
} |
<script lang="ts">
import CarbonEarth from "~icons/carbon/earth";
import CarbonArrowUpRight from "~icons/carbon/arrow-up-right";
import BIMeta from "~icons/bi/meta";
import CarbonCode from "~icons/carbon/code";
import type { Model } from "$lib/types/Model";
interface Props {
model: Pick<
Model,
"name" | "datasetName" | "websiteUrl" | "modelUrl" | "datasetUrl" | "hasInferenceAPI"
>;
variant?: "light" | "dark";
}
let { model, variant = "light" }: Props = $props();
</script>
<div
class="flex items-center gap-5 rounded-xl bg-gray-100 px-3 py-2 text-xs sm:text-sm
{variant === 'dark'
? 'text-gray-600 dark:bg-gray-800 dark:text-gray-300'
: 'text-gray-800 dark:bg-gray-100 dark:text-gray-600'}"
>
<a
href={model.modelUrl || "https://huggingface.co/" + model.name}
target="_blank"
rel="noreferrer"
class="flex items-center hover:underline"
><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" />
Model
<div class="max-sm:hidden"> page</div></a
>
{#if model.datasetName || model.datasetUrl}
<a
href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName}
target="_blank"
rel="noreferrer"
class="flex items-center hover:underline"
><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" />
Dataset
<div class="max-sm:hidden"> page</div></a
>
{/if}
{#if model.hasInferenceAPI}
<a
href={"https://huggingface.co/playground?modelId=" + model.name}
target="_blank"
rel="noreferrer"
class="flex items-center hover:underline"
><CarbonCode class="mr-1.5 shrink-0 text-xs text-gray-400" />
API
</a>
{/if}
{#if model.websiteUrl}
<a
href={model.websiteUrl}
target="_blank"
class="ml-auto flex items-center hover:underline"
rel="noreferrer"
>
{#if model.name.startsWith("meta-llama/Meta-Llama")}
<BIMeta class="mr-1.5 shrink-0 text-xs text-gray-400" />
Built with Llama
{:else}
<CarbonEarth class="mr-1.5 shrink-0 text-xs text-gray-400" />
Website
{/if}
</a>
{/if}
</div>
| chat-ui/src/lib/components/ModelCardMetadata.svelte/0 | {
"file_path": "chat-ui/src/lib/components/ModelCardMetadata.svelte",
"repo_id": "chat-ui",
"token_count": 901
} |
<script lang="ts">
import CarbonWikis from "~icons/carbon/wikis";
import CarbonTools from "~icons/carbon/tools";
import CarbonCamera from "~icons/carbon/camera";
import CarbonCode from "~icons/carbon/code";
import CarbonEmail from "~icons/carbon/email";
import CarbonCloud from "~icons/carbon/cloud-upload";
import CarbonTerminal from "~icons/carbon/terminal";
import CarbonGame from "~icons/carbon/game-console";
import CarbonChat from "~icons/carbon/chat-bot";
import CarbonSpeaker from "~icons/carbon/volume-up";
import CarbonVideo from "~icons/carbon/video";
interface Props {
color: string;
icon: string;
size?: "xs" | "sm" | "md" | "lg";
}
let { color, icon, size = "md" }: Props = $props();
let gradientColor = $derived(
(() => {
switch (color) {
case "purple":
return "#653789";
case "blue":
return "#375889";
case "green":
return "#37894E";
case "yellow":
return "#897C37";
case "red":
return "#893737";
default:
return "#FFF";
}
})()
);
let iconEl = $state(CarbonWikis);
switch (icon) {
case "wikis":
iconEl = CarbonWikis;
break;
case "tools":
iconEl = CarbonTools;
break;
case "camera":
iconEl = CarbonCamera;
break;
case "code":
iconEl = CarbonCode;
break;
case "email":
iconEl = CarbonEmail;
break;
case "cloud":
iconEl = CarbonCloud;
break;
case "terminal":
iconEl = CarbonTerminal;
break;
case "game":
iconEl = CarbonGame;
break;
case "chat":
iconEl = CarbonChat;
break;
case "speaker":
iconEl = CarbonSpeaker;
break;
case "video":
iconEl = CarbonVideo;
break;
}
let sizeClass = $derived(
(() => {
switch (size) {
case "xs":
return "size-4";
case "sm":
return "size-8";
case "md":
return "size-14";
case "lg":
return "size-24";
}
})()
);
const SvelteComponent = $derived(iconEl);
</script>
<div class="flex {sizeClass} relative items-center justify-center">
<svg xmlns="http://www.w3.org/2000/svg" class="absolute {sizeClass} h-full" viewBox="0 0 52 58">
<defs>
<linearGradient id="gradient-{gradientColor}" gradientTransform="rotate(90)">
<stop offset="0%" stop-color="#0E1523" />
<stop offset="100%" stop-color={gradientColor} />
</linearGradient>
<mask id="mask">
<path
d="M22.3043 1.2486C23.4279 0.603043 24.7025 0.263184 26 0.263184C27.2975 0.263184 28.5721 0.603043 29.6957 1.2486L48.3043 11.9373C49.4279 12.5828 50.361 13.5113 51.0097 14.6294C51.6584 15.7475 52 17.0158 52 18.3069V39.6902C52 40.9813 51.6584 42.2496 51.0097 43.3677C50.361 44.4858 49.4279 45.4143 48.3043 46.0598L29.6957 56.7514C28.5721 57.397 27.2975 57.7369 26 57.7369C24.7025 57.7369 23.4279 57.397 22.3043 56.7514L3.6957 46.0598C2.57209 45.4143 1.63904 44.4858 0.990308 43.3677C0.341578 42.2496 3.34785e-05 40.9813 5.18628e-07 39.6902V18.3099C-0.000485629 17.0183 0.340813 15.7494 0.989568 14.6307C1.63832 13.512 2.57166 12.5831 3.6957 11.9373L22.3043 1.2486Z"
fill="white"
/>
</mask>
</defs>
<rect width="100%" height="100%" fill="url(#gradient-{gradientColor})" mask="url(#mask)" />
</svg>
<SvelteComponent class="relative {sizeClass} scale-50 text-clip text-gray-200" />
</div>
| chat-ui/src/lib/components/ToolLogo.svelte/0 | {
"file_path": "chat-ui/src/lib/components/ToolLogo.svelte",
"repo_id": "chat-ui",
"token_count": 1446
} |
<script lang="ts">
import { createEventDispatcher } from "svelte";
import { page } from "$app/stores";
import type { MessageFile } from "$lib/types/Message";
import CarbonClose from "~icons/carbon/close";
import CarbonDocumentBlank from "~icons/carbon/document-blank";
import CarbonDownload from "~icons/carbon/download";
import CarbonDocument from "~icons/carbon/document";
import Modal from "../Modal.svelte";
import AudioPlayer from "../players/AudioPlayer.svelte";
import EosIconsLoading from "~icons/eos-icons/loading";
import { base } from "$app/paths";
interface Props {
file: MessageFile;
canClose?: boolean;
}
let { file, canClose = true }: Props = $props();
let showModal = $state(false);
let urlNotTrailing = $derived($page.url.pathname.replace(/\/$/, ""));
const dispatch = createEventDispatcher<{ close: void }>();
function truncateMiddle(text: string, maxLength: number): string {
if (text.length <= maxLength) {
return text;
}
const halfLength = Math.floor((maxLength - 1) / 2);
const start = text.substring(0, halfLength);
const end = text.substring(text.length - halfLength);
return `${start}…${end}`;
}
const isImage = (mime: string) =>
mime.startsWith("image/") || mime === "webp" || mime === "jpeg" || mime === "png";
const isAudio = (mime: string) =>
mime.startsWith("audio/") || mime === "mp3" || mime === "wav" || mime === "x-wav";
const isVideo = (mime: string) =>
mime.startsWith("video/") || mime === "mp4" || mime === "x-mpeg";
const isPlainText = (mime: string) =>
mime === "text/plain" ||
mime === "text/csv" ||
mime === "text/markdown" ||
mime === "application/json" ||
mime === "application/xml" ||
mime === "application/vnd.chatui.clipboard";
let isClickable = $derived(isImage(file.mime) || isPlainText(file.mime));
</script>
{#if showModal && isClickable}
<!-- show the image file full screen, click outside to exit -->
<Modal width="sm:max-w-[800px]" on:close={() => (showModal = false)}>
{#if isImage(file.mime)}
{#if file.type === "hash"}
<img
src={urlNotTrailing + "/output/" + file.value}
alt="input from user"
class="aspect-auto"
/>
{:else}
<!-- handle the case where this is a base64 encoded image -->
<img
src={`data:${file.mime};base64,${file.value}`}
alt="input from user"
class="aspect-auto"
/>
{/if}
{:else if isPlainText(file.mime)}
<div class="relative flex h-full w-full flex-col gap-4 p-4">
<h3 class="-mb-4 pt-2 text-xl font-bold">{file.name}</h3>
{#if file.mime === "application/vnd.chatui.clipboard"}
<p class="text-sm text-gray-500">
If you prefer to inject clipboard content directly in the chat, you can disable this
feature in the
<a href={`${base}/settings`} class="underline">settings page</a>.
</p>
{/if}
<button
class="absolute right-4 top-4 text-xl text-gray-500 hover:text-gray-800"
onclick={() => (showModal = false)}
>
<CarbonClose class="text-xl" />
</button>
{#if file.type === "hash"}
{#await fetch(urlNotTrailing + "/output/" + file.value).then((res) => res.text())}
<div class="flex h-full w-full items-center justify-center">
<EosIconsLoading class="text-xl" />
</div>
{:then result}
<pre
class="w-full whitespace-pre-wrap break-words pt-0 text-sm"
class:font-sans={file.mime === "text/plain" ||
file.mime === "application/vnd.chatui.clipboard"}
class:font-mono={file.mime !== "text/plain" &&
file.mime !== "application/vnd.chatui.clipboard"}>{result}</pre>
{/await}
{:else}
<pre
class="w-full whitespace-pre-wrap break-words pt-0 text-sm"
class:font-sans={file.mime === "text/plain" ||
file.mime === "application/vnd.chatui.clipboard"}
class:font-mono={file.mime !== "text/plain" &&
file.mime !== "application/vnd.chatui.clipboard"}>{atob(file.value)}</pre>
{/if}
</div>
{/if}
</Modal>
{/if}
<div
onclick={() => isClickable && (showModal = true)}
onkeydown={(e) => {
if (!isClickable) {
return;
}
if (e.key === "Enter" || e.key === " ") {
showModal = true;
}
}}
class:clickable={isClickable}
role="button"
tabindex="0"
>
<div class="group relative flex items-center rounded-xl shadow-sm">
{#if isImage(file.mime)}
<div class="size-48 overflow-hidden rounded-xl">
<img
src={file.type === "base64"
? `data:${file.mime};base64,${file.value}`
: urlNotTrailing + "/output/" + file.value}
alt={file.name}
class="h-full w-full bg-gray-200 object-cover dark:bg-gray-800"
/>
</div>
{:else if isAudio(file.mime)}
<AudioPlayer
src={file.type === "base64"
? `data:${file.mime};base64,${file.value}`
: urlNotTrailing + "/output/" + file.value}
name={truncateMiddle(file.name, 28)}
/>
{:else if isVideo(file.mime)}
<div
class="border-1 w-72 overflow-clip rounded-xl border-gray-200 bg-white dark:border-gray-800 dark:bg-gray-900"
>
<!-- svelte-ignore a11y_media_has_caption -->
<video
src={file.type === "base64"
? `data:${file.mime};base64,${file.value}`
: urlNotTrailing + "/output/" + file.value}
controls
></video>
</div>
{:else if isPlainText(file.mime)}
<div
class="flex h-14 w-72 items-center gap-2 overflow-hidden rounded-xl border border-gray-200 bg-white p-2 dark:border-gray-800 dark:bg-gray-900"
class:file-hoverable={isClickable}
>
<div
class="grid size-10 flex-none place-items-center rounded-lg bg-gray-100 dark:bg-gray-800"
>
<CarbonDocument class="text-base text-gray-700 dark:text-gray-300" />
</div>
<dl class="flex flex-col items-start truncate leading-tight">
<dd class="text-sm">
{truncateMiddle(file.name, 28)}
</dd>
{#if file.mime === "application/vnd.chatui.clipboard"}
<dt class="text-xs text-gray-400">Clipboard source</dt>
{:else}
<dt class="text-xs text-gray-400">{file.mime}</dt>
{/if}
</dl>
</div>
{:else if file.mime === "octet-stream"}
<div
class="flex h-14 w-72 items-center gap-2 overflow-hidden rounded-xl border border-gray-200 bg-white p-2 dark:border-gray-800 dark:bg-gray-900"
class:file-hoverable={isClickable}
>
<div
class="grid size-10 flex-none place-items-center rounded-lg bg-gray-100 dark:bg-gray-800"
>
<CarbonDocumentBlank class="text-base text-gray-700 dark:text-gray-300" />
</div>
<dl class="flex flex-grow flex-col truncate leading-tight">
<dd class="text-sm">
{truncateMiddle(file.name, 28)}
</dd>
<dt class="text-xs text-gray-400">File type could not be determined</dt>
</dl>
<a
href={file.type === "base64"
? `data:application/octet-stream;base64,${file.value}`
: urlNotTrailing + "/output/" + file.value}
download={file.name}
class="ml-auto flex-none"
>
<CarbonDownload class="text-base text-gray-700 dark:text-gray-300" />
</a>
</div>
{:else}
<div
class="flex h-14 w-72 items-center gap-2 overflow-hidden rounded-xl border border-gray-200 bg-white p-2 dark:border-gray-800 dark:bg-gray-900"
class:file-hoverable={isClickable}
>
<div
class="grid size-10 flex-none place-items-center rounded-lg bg-gray-100 dark:bg-gray-800"
>
<CarbonDocumentBlank class="text-base text-gray-700 dark:text-gray-300" />
</div>
<dl class="flex flex-col items-start truncate leading-tight">
<dd class="text-sm">
{truncateMiddle(file.name, 28)}
</dd>
<dt class="text-xs text-gray-400">{file.mime}</dt>
</dl>
</div>
{/if}
<!-- add a button on top that removes the image -->
{#if canClose}
<button
class="absolute -right-2 -top-2 z-10 grid size-6 place-items-center rounded-full border bg-black group-hover:visible dark:border-gray-700"
class:invisible={navigator.maxTouchPoints === 0}
onclick={(e) => {
e.preventDefault();
e.stopPropagation();
dispatch("close");
}}
>
<CarbonClose class=" text-xs text-white" />
</button>
{/if}
</div>
</div>
| chat-ui/src/lib/components/chat/UploadedFile.svelte/0 | {
"file_path": "chat-ui/src/lib/components/chat/UploadedFile.svelte",
"repo_id": "chat-ui",
"token_count": 3554
} |
import type { ObjectId } from "mongodb";
import updateSearchAssistant from "./01-update-search-assistants";
import updateAssistantsModels from "./02-update-assistants-models";
import type { Database } from "$lib/server/database";
import addToolsToSettings from "./03-add-tools-in-settings";
import updateMessageUpdates from "./04-update-message-updates";
import updateMessageFiles from "./05-update-message-files";
import trimMessageUpdates from "./06-trim-message-updates";
import resetTools from "./07-reset-tools-in-settings";
import updateFeaturedToReview from "./08-update-featured-to-review";
import deleteEmptyConversations from "./09-delete-empty-conversations";
export interface Migration {
_id: ObjectId;
name: string;
up: (client: Database) => Promise<boolean>;
down?: (client: Database) => Promise<boolean>;
runForFreshInstall?: "only" | "never"; // leave unspecified to run for both
runForHuggingChat?: "only" | "never"; // leave unspecified to run for both
runEveryTime?: boolean;
}
export const migrations: Migration[] = [
updateSearchAssistant,
updateAssistantsModels,
addToolsToSettings,
updateMessageUpdates,
updateMessageFiles,
trimMessageUpdates,
resetTools,
updateFeaturedToReview,
deleteEmptyConversations,
];
| chat-ui/src/lib/migrations/routines/index.ts/0 | {
"file_path": "chat-ui/src/lib/migrations/routines/index.ts",
"repo_id": "chat-ui",
"token_count": 376
} |
import { z } from "zod";
import { env } from "$env/dynamic/private";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type { Cohere, CohereClient } from "cohere-ai";
import { buildPrompt } from "$lib/buildPrompt";
import { ToolResultStatus, type ToolCall } from "$lib/types/Tool";
import { pipeline, Writable, type Readable } from "node:stream";
import { toolHasName } from "$lib/utils/tools";
export const endpointCohereParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("cohere"),
apiKey: z.string().default(env.COHERE_API_TOKEN),
clientName: z.string().optional(),
raw: z.boolean().default(false),
forceSingleStep: z.boolean().default(true),
});
export async function endpointCohere(
input: z.input<typeof endpointCohereParametersSchema>
): Promise<Endpoint> {
const { apiKey, clientName, model, raw, forceSingleStep } =
endpointCohereParametersSchema.parse(input);
let cohere: CohereClient;
try {
cohere = new (await import("cohere-ai")).CohereClient({
token: apiKey,
clientName,
});
} catch (e) {
throw new Error("Failed to import cohere-ai", { cause: e });
}
return async ({ messages, preprompt, generateSettings, continueMessage, tools, toolResults }) => {
let system = preprompt;
if (messages?.[0]?.from === "system") {
system = messages[0].content;
}
// Tools must use [A-z_] for their names and directly_answer is banned
// It's safe to convert the tool names because we treat - and _ the same
tools = tools
?.filter((tool) => !toolHasName("directly_answer", tool))
.map((tool) => ({ ...tool, name: tool.name.replaceAll("-", "_") }));
const parameters = { ...model.parameters, ...generateSettings };
return (async function* () {
let stream;
let tokenId = 0;
if (raw) {
const prompt = await buildPrompt({
messages,
model,
preprompt: system,
continueMessage,
tools,
toolResults,
});
stream = await cohere.chatStream({
forceSingleStep,
message: prompt,
rawPrompting: true,
model: model.id ?? model.name,
p: parameters?.top_p,
k: parameters?.top_k,
maxTokens: parameters?.max_new_tokens,
temperature: parameters?.temperature,
stopSequences: parameters?.stop,
frequencyPenalty: parameters?.frequency_penalty,
});
} else {
const formattedMessages = messages
.filter((message) => message.from !== "system")
.map((message) => ({
role: message.from === "user" ? "USER" : "CHATBOT",
message: message.content,
})) satisfies Cohere.Message[];
stream = await cohere
.chatStream({
forceSingleStep,
model: model.id ?? model.name,
chatHistory: formattedMessages.slice(0, -1),
message: formattedMessages[formattedMessages.length - 1].message,
preamble: system,
p: parameters?.top_p,
k: parameters?.top_k,
maxTokens: parameters?.max_new_tokens,
temperature: parameters?.temperature,
stopSequences: parameters?.stop,
frequencyPenalty: parameters?.frequency_penalty,
tools,
toolResults:
toolResults?.length && toolResults?.length > 0
? toolResults?.map((toolResult) => {
if (toolResult.status === ToolResultStatus.Error) {
return { call: toolResult.call, outputs: [{ error: toolResult.message }] };
}
return { call: toolResult.call, outputs: toolResult.outputs };
})
: undefined,
})
.catch(async (err) => {
if (!err.body) throw err;
// Decode the error message and throw
const message = await convertStreamToBuffer(err.body).catch(() => {
throw err;
});
throw Error(message, { cause: err });
});
}
for await (const output of stream) {
if (output.eventType === "text-generation") {
yield {
token: {
id: tokenId++,
text: output.text,
logprob: 0,
special: false,
},
generated_text: null,
details: null,
} satisfies TextGenerationStreamOutput;
} else if (output.eventType === "tool-calls-generation") {
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
toolCalls: output.toolCalls as ToolCall[],
},
generated_text: null,
details: null,
};
} else if (output.eventType === "stream-end") {
if (["ERROR", "ERROR_TOXIC", "ERROR_LIMIT"].includes(output.finishReason)) {
throw new Error(output.finishReason);
}
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: output.response.text,
details: null,
};
}
}
})();
};
}
async function convertStreamToBuffer(webReadableStream: Readable) {
return new Promise<string>((resolve, reject) => {
const chunks: Buffer[] = [];
pipeline(
webReadableStream,
new Writable({
write(chunk, _, callback) {
chunks.push(chunk);
callback();
},
}),
(err) => {
if (err) {
reject(err);
} else {
resolve(Buffer.concat(chunks).toString("utf-8"));
}
}
);
});
}
| chat-ui/src/lib/server/endpoints/cohere/endpointCohere.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/cohere/endpointCohere.ts",
"repo_id": "chat-ui",
"token_count": 2221
} |
import type { Conversation } from "$lib/types/Conversation";
import type { MessageFile } from "$lib/types/Message";
import { sha256 } from "$lib/utils/sha256";
import { fileTypeFromBuffer } from "file-type";
import { collections } from "$lib/server/database";
export async function uploadFile(file: File, conv: Conversation): Promise<MessageFile> {
const sha = await sha256(await file.text());
const buffer = await file.arrayBuffer();
// Attempt to detect the mime type of the file, fallback to the uploaded mime
const mime = await fileTypeFromBuffer(buffer).then((fileType) => fileType?.mime ?? file.type);
const upload = collections.bucket.openUploadStream(`${conv._id}-${sha}`, {
metadata: { conversation: conv._id.toString(), mime },
});
upload.write((await file.arrayBuffer()) as unknown as Buffer);
upload.end();
// only return the filename when upload throws a finish event or a 20s time out occurs
return new Promise((resolve, reject) => {
upload.once("finish", () =>
resolve({ type: "hash", value: sha, mime: file.type, name: file.name })
);
upload.once("error", reject);
setTimeout(() => reject(new Error("Upload timed out")), 20_000);
});
}
| chat-ui/src/lib/server/files/uploadFile.ts/0 | {
"file_path": "chat-ui/src/lib/server/files/uploadFile.ts",
"repo_id": "chat-ui",
"token_count": 364
} |
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
export async function sendSlack(text: string) {
if (!env.WEBHOOK_URL_REPORT_ASSISTANT) {
logger.warn("WEBHOOK_URL_REPORT_ASSISTANT is not set, tried to send a slack message.");
return;
}
const res = await fetch(env.WEBHOOK_URL_REPORT_ASSISTANT, {
method: "POST",
headers: {
"Content-type": "application/json",
},
body: JSON.stringify({
text,
}),
});
if (!res.ok) {
logger.error(`Webhook message failed. ${res.statusText} ${res.text}`);
}
}
| chat-ui/src/lib/server/sendSlack.ts/0 | {
"file_path": "chat-ui/src/lib/server/sendSlack.ts",
"repo_id": "chat-ui",
"token_count": 226
} |
import { z } from "zod";
import { env } from "$env/dynamic/private";
import JSON5 from "json5";
// RATE_LIMIT is the legacy way to define messages per minute limit
export const usageLimitsSchema = z
.object({
conversations: z.coerce.number().optional(), // how many conversations
messages: z.coerce.number().optional(), // how many messages in a conversation
assistants: z.coerce.number().optional(), // how many assistants
messageLength: z.coerce.number().optional(), // how long can a message be before we cut it off
messagesPerMinute: z
.preprocess((val) => {
if (val === undefined) {
return env.RATE_LIMIT;
}
return val;
}, z.coerce.number().optional())
.optional(), // how many messages per minute
tools: z.coerce.number().optional(), // how many tools
})
.optional();
export const usageLimits = usageLimitsSchema.parse(JSON5.parse(env.USAGE_LIMITS));
| chat-ui/src/lib/server/usageLimits.ts/0 | {
"file_path": "chat-ui/src/lib/server/usageLimits.ts",
"repo_id": "chat-ui",
"token_count": 309
} |
import type { WebSearchSource } from "$lib/types/WebSearch";
import { env } from "$env/dynamic/private";
export default async function search(query: string): Promise<WebSearchSource[]> {
// const params = {
// q: query,
// // You can add other parameters if needed, like 'count', 'offset', etc.
// };
const response = await fetch(
"https://api.bing.microsoft.com/v7.0/search" + "?q=" + encodeURIComponent(query),
{
method: "GET",
headers: {
"Ocp-Apim-Subscription-Key": env.BING_SUBSCRIPTION_KEY,
"Content-type": "application/json",
},
}
);
/* eslint-disable @typescript-eslint/no-explicit-any */
const data = (await response.json()) as Record<string, any>;
if (!response.ok) {
throw new Error(
data["message"] ?? `Bing API returned error code ${response.status} - ${response.statusText}`
);
}
// Adapt the data structure from the Bing response to match the WebSearchSource type
const webPages = data["webPages"]?.["value"] ?? [];
return webPages.map((page: any) => ({
title: page.name,
link: page.url,
text: page.snippet,
displayLink: page.displayUrl,
}));
}
| chat-ui/src/lib/server/websearch/search/endpoints/bing.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/search/endpoints/bing.ts",
"repo_id": "chat-ui",
"token_count": 408
} |
import { browser } from "$app/environment";
import { invalidate } from "$app/navigation";
import { base } from "$app/paths";
import { UrlDependency } from "$lib/types/UrlDependency";
import type { ObjectId } from "mongodb";
import { getContext, setContext } from "svelte";
import { type Writable, writable, get } from "svelte/store";
type SettingsStore = {
shareConversationsWithModelAuthors: boolean;
hideEmojiOnSidebar: boolean;
ethicsModalAccepted: boolean;
ethicsModalAcceptedAt: Date | null;
activeModel: string;
customPrompts: Record<string, string>;
recentlySaved: boolean;
assistants: Array<ObjectId | string>;
tools?: Array<string>;
disableStream: boolean;
directPaste: boolean;
};
type SettingsStoreWritable = Writable<SettingsStore> & {
instantSet: (settings: Partial<SettingsStore>) => Promise<void>;
};
export function useSettingsStore() {
return getContext<SettingsStoreWritable>("settings");
}
export function createSettingsStore(initialValue: Omit<SettingsStore, "recentlySaved">) {
const baseStore = writable({ ...initialValue, recentlySaved: false });
let timeoutId: NodeJS.Timeout;
async function setSettings(settings: Partial<SettingsStore>) {
baseStore.update((s) => ({
...s,
...settings,
}));
clearTimeout(timeoutId);
if (browser) {
timeoutId = setTimeout(async () => {
await fetch(`${base}/settings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
...get(baseStore),
...settings,
}),
});
invalidate(UrlDependency.ConversationList);
// set savedRecently to true for 3s
baseStore.update((s) => ({
...s,
recentlySaved: true,
}));
setTimeout(() => {
baseStore.update((s) => ({
...s,
recentlySaved: false,
}));
}, 3000);
invalidate(UrlDependency.ConversationList);
}, 300);
// debounce server calls by 300ms
}
}
async function instantSet(settings: Partial<SettingsStore>) {
baseStore.update((s) => ({
...s,
...settings,
}));
if (browser) {
await fetch(`${base}/settings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
...get(baseStore),
...settings,
}),
});
invalidate(UrlDependency.ConversationList);
}
}
const newStore = {
subscribe: baseStore.subscribe,
set: setSettings,
instantSet,
update: (fn: (s: SettingsStore) => SettingsStore) => {
setSettings(fn(get(baseStore)));
},
} satisfies SettingsStoreWritable;
setContext("settings", newStore);
return newStore;
}
| chat-ui/src/lib/stores/settings.ts/0 | {
"file_path": "chat-ui/src/lib/stores/settings.ts",
"repo_id": "chat-ui",
"token_count": 1007
} |
const file2base64 = (file: File): Promise<string> => {
return new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = () => {
const dataUrl = reader.result as string;
const base64 = dataUrl.split(",")[1];
resolve(base64);
};
reader.onerror = (error) => reject(error);
});
};
export default file2base64;
| chat-ui/src/lib/utils/file2base64.ts/0 | {
"file_path": "chat-ui/src/lib/utils/file2base64.ts",
"repo_id": "chat-ui",
"token_count": 142
} |
export async function captureScreen(): Promise<string> {
let stream: MediaStream | undefined;
try {
// This will show the native browser dialog for screen capture
stream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: false,
});
// Create a canvas element to capture the screenshot
const canvas = document.createElement("canvas");
const video = document.createElement("video");
// Wait for the video to load metadata
await new Promise((resolve) => {
video.onloadedmetadata = () => {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
video.play();
resolve(null);
};
if (stream) {
video.srcObject = stream;
} else {
throw Error("No stream available");
}
});
// Draw the video frame to canvas
const context = canvas.getContext("2d");
context?.drawImage(video, 0, 0, canvas.width, canvas.height);
// Convert to base64
return canvas.toDataURL("image/png");
} catch (error) {
console.error("Error capturing screenshot:", error);
throw error;
} finally {
// Stop all tracks
if (stream) {
stream.getTracks().forEach((track) => track.stop());
}
}
}
| chat-ui/src/lib/utils/screenshot.ts/0 | {
"file_path": "chat-ui/src/lib/utils/screenshot.ts",
"repo_id": "chat-ui",
"token_count": 402
} |
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
import { describe, expect, it } from "vitest";
import { convertLegacyConversation } from "./convertLegacyConversation";
import { insertLegacyConversation } from "./treeHelpers.spec";
describe("convertLegacyConversation", () => {
it("should convert a legacy conversation", async () => {
const convId = await insertLegacyConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
const newConv = convertLegacyConversation(conv);
expect(newConv.rootMessageId).toBe(newConv.messages[0].id);
expect(newConv.messages[0].ancestors).toEqual([]);
expect(newConv.messages[1].ancestors).toEqual([newConv.messages[0].id]);
expect(newConv.messages[0].children).toEqual([newConv.messages[1].id]);
});
it("should work on empty conversations", async () => {
const conv = {
_id: new ObjectId(),
rootMessageId: undefined,
messages: [],
};
const newConv = convertLegacyConversation(conv);
expect(newConv.rootMessageId).toBe(undefined);
expect(newConv.messages).toEqual([]);
});
});
| chat-ui/src/lib/utils/tree/convertLegacyConversation.spec.ts/0 | {
"file_path": "chat-ui/src/lib/utils/tree/convertLegacyConversation.spec.ts",
"repo_id": "chat-ui",
"token_count": 425
} |
import { env } from "$env/dynamic/private";
import { Client } from "@gradio/client";
export async function GET({ url }) {
if (env.COMMUNITY_TOOLS !== "true") {
return new Response("Community tools are not enabled", { status: 403 });
}
const space = url.searchParams.get("space");
if (!space) {
return new Response("Missing space", { status: 400 });
}
// Extract namespace from space URL or use as-is if it's already in namespace format
let namespace = null;
if (space.startsWith("https://huggingface.co/spaces/")) {
namespace = space.split("/").slice(-2).join("/");
} else if (space.match(/^[^/]+\/[^/]+$/)) {
namespace = space;
}
if (!namespace) {
return new Response(
"Invalid space name. Specify a namespace or a full URL on huggingface.co.",
{ status: 400 }
);
}
try {
const api = await (await Client.connect(namespace)).view_api();
return new Response(JSON.stringify(api), {
status: 200,
headers: {
"Content-Type": "application/json",
},
});
} catch (e) {
return new Response("Error fetching space API. Is the name correct?", {
status: 400,
headers: {
"Content-Type": "application/json",
},
});
}
}
| chat-ui/src/routes/api/spaces-config/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/spaces-config/+server.ts",
"repo_id": "chat-ui",
"token_count": 428
} |
import { authCondition } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { MetricsServer } from "$lib/server/metrics.js";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import { z } from "zod";
export async function POST({ params, request, locals }) {
const { score } = z
.object({
score: z.number().int().min(-1).max(1),
})
.parse(await request.json());
const conversationId = new ObjectId(params.id);
const messageId = params.messageId;
// aggregate votes per model in order to detect model performance degradation
const model = await collections.conversations
.findOne(
{
_id: conversationId,
...authCondition(locals),
},
{ projection: { model: 1 } }
)
.then((c) => c?.model);
if (model) {
if (score === 1) {
MetricsServer.getMetrics().model.votesPositive.inc({ model });
} else {
MetricsServer.getMetrics().model.votesNegative.inc({ model });
}
}
const document = await collections.conversations.updateOne(
{
_id: conversationId,
...authCondition(locals),
"messages.id": messageId,
},
{
...(score !== 0
? {
$set: {
"messages.$.score": score,
},
}
: { $unset: { "messages.$.score": "" } }),
}
);
if (!document.matchedCount) {
error(404, "Message not found");
}
return new Response();
}
| chat-ui/src/routes/conversation/[id]/message/[messageId]/vote/+server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/[id]/message/[messageId]/vote/+server.ts",
"repo_id": "chat-ui",
"token_count": 524
} |
<script lang="ts">
import { marked } from "marked";
import privacy from "../../../PRIVACY.md?raw";
</script>
<div class="overflow-auto p-6">
<div class="prose mx-auto px-4 pb-24 pt-6 dark:prose-invert md:pt-12">
<!-- eslint-disable-next-line svelte/no-at-html-tags -->
{@html marked(privacy, { gfm: true })}
</div>
</div>
| chat-ui/src/routes/privacy/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/privacy/+page.svelte",
"repo_id": "chat-ui",
"token_count": 141
} |
import { collections } from "$lib/server/database";
import type { LayoutServerLoad } from "./$types";
import type { Report } from "$lib/types/Report";
export const load = (async ({ locals, parent }) => {
const { assistants } = await parent();
let reportsByUser: string[] = [];
const createdBy = locals.user?._id ?? locals.sessionId;
if (createdBy) {
const reports = await collections.reports
.find<
Pick<Report, "contentId">
>({ createdBy, object: "assistant" }, { projection: { _id: 0, contentId: 1 } })
.toArray();
reportsByUser = reports.map((r) => r.contentId.toString());
}
return {
assistants: (await assistants).map((el) => ({
...el,
reported: reportsByUser.includes(el._id),
})),
};
}) satisfies LayoutServerLoad;
| chat-ui/src/routes/settings/+layout.server.ts/0 | {
"file_path": "chat-ui/src/routes/settings/+layout.server.ts",
"repo_id": "chat-ui",
"token_count": 258
} |
@import "./highlight-js.css";
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer components {
.btn {
@apply inline-flex flex-shrink-0 cursor-pointer select-none items-center justify-center whitespace-nowrap outline-none transition-all focus:ring disabled:cursor-default;
}
.active-model {
@apply border-blue-500 bg-blue-500/5 hover:bg-blue-500/10;
}
.file-hoverable {
@apply hover:bg-gray-500/10;
}
.base-tool {
@apply flex h-[1.6rem] items-center gap-[.2rem] whitespace-nowrap border border-transparent text-xs outline-none transition-all focus:outline-none active:outline-none dark:hover:text-gray-300 sm:hover:text-purple-600;
}
.active-tool {
@apply rounded-full !border-purple-200 bg-purple-100 pl-1 pr-2 text-purple-600 hover:text-purple-600 dark:!border-purple-700 dark:bg-purple-600/40 dark:text-purple-200;
}
}
@layer utilities {
.scrollbar-custom {
@apply scrollbar-thin scrollbar-track-transparent scrollbar-thumb-black/10 scrollbar-thumb-rounded-full scrollbar-w-1 hover:scrollbar-thumb-black/20 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20;
}
}
.katex-display {
overflow: auto hidden;
}
| chat-ui/src/styles/main.css/0 | {
"file_path": "chat-ui/src/styles/main.css",
"repo_id": "chat-ui",
"token_count": 435
} |
{
"license": "Apache-2.0",
"creators": [
{
"affiliation": "Hugging Face",
"name": "Quentin Lhoest"
},
{
"orcid": "0000-0003-1727-1045",
"affiliation": "Hugging Face",
"name": "Albert Villanova del Moral"
},
{
"affiliation": "Hugging Face",
"name": "Patrick von Platen"
},
{
"affiliation": "Hugging Face",
"name": "Thomas Wolf"
},
{
"affiliation": "Hugging Face",
"name": "Mario Šaško"
},
{
"affiliation": "Hugging Face",
"name": "Yacine Jernite"
},
{
"affiliation": "Hugging Face",
"name": "Abhishek Thakur"
},
{
"affiliation": "Hugging Face",
"name": "Lewis Tunstall"
},
{
"affiliation": "Hugging Face",
"name": "Suraj Patil"
},
{
"affiliation": "Hugging Face",
"name": "Mariama Drame"
},
{
"affiliation": "Hugging Face",
"name": "Julien Chaumond"
},
{
"affiliation": "Hugging Face",
"name": "Julien Plu"
},
{
"affiliation": "Hugging Face",
"name": "Joe Davison"
},
{
"affiliation": "Hugging Face",
"name": "Simon Brandeis"
},
{
"affiliation": "Hugging Face",
"name": "Victor Sanh"
},
{
"affiliation": "Hugging Face",
"name": "Teven Le Scao"
},
{
"affiliation": "Hugging Face",
"name": "Kevin Canwen Xu"
},
{
"affiliation": "Hugging Face",
"name": "Nicolas Patry"
},
{
"affiliation": "Hugging Face",
"name": "Steven Liu"
},
{
"affiliation": "Hugging Face",
"name": "Angelina McMillan-Major"
},
{
"affiliation": "Hugging Face",
"name": "Philipp Schmid"
},
{
"affiliation": "Hugging Face",
"name": "Sylvain Gugger"
},
{
"affiliation": "Hugging Face",
"name": "Nathan Raw"
},
{
"affiliation": "Hugging Face",
"name": "Sylvain Lesage"
},
{
"affiliation": "Hugging Face",
"name": "Anton Lozhkov"
},
{
"affiliation": "Hugging Face",
"name": "Matthew Carrigan"
},
{
"affiliation": "Hugging Face",
"name": "Th\u00e9o Matussi\u00e8re"
},
{
"affiliation": "Hugging Face",
"name": "Leandro von Werra"
},
{
"affiliation": "Hugging Face",
"name": "Lysandre Debut"
},
{
"affiliation": "Hugging Face",
"name": "Stas Bekman"
},
{
"affiliation": "Hugging Face",
"name": "Cl\u00e9ment Delangue"
}
]
} | datasets/.zenodo.json/0 | {
"file_path": "datasets/.zenodo.json",
"repo_id": "datasets",
"token_count": 1953
} |
# Differences between Dataset and IterableDataset
There are two types of dataset objects, a [`Dataset`] and an [`IterableDataset`].
Whichever type of dataset you choose to use or create depends on the size of the dataset.
In general, an [`IterableDataset`] is ideal for big datasets (think hundreds of GBs!) due to its lazy behavior and speed advantages, while a [`Dataset`] is great for everything else.
This page will compare the differences between a [`Dataset`] and an [`IterableDataset`] to help you pick the right dataset object for you.
## Downloading and streaming
When you have a regular [`Dataset`], you can access it using `my_dataset[0]`. This provides random access to the rows.
Such datasets are also called "map-style" datasets.
For example you can download ImageNet-1k like this and access any row:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train") # downloads the full dataset
print(imagenet[0])
```
But one caveat is that you must have the entire dataset stored on your disk or in memory, which blocks you from accessing datasets bigger than the disk.
Because it can become inconvenient for big datasets, there exists another type of dataset, the [`IterableDataset`].
When you have an `IterableDataset`, you can access it using a `for` loop to load the data progressively as you iterate over the dataset.
This way, only a small fraction of examples is loaded in memory, and you don't write anything on disk.
For example, you can stream the ImageNet-1k dataset without downloading it on disk:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train", streaming=True) # will start loading the data when iterated over
for example in imagenet:
print(example)
break
```
Streaming can read online data without writing any file to disk.
For example, you can stream datasets made out of multiple shards, each of which is hundreds of gigabytes like [C4](https://huggingface.co/datasets/c4), [OSCAR](https://huggingface.co/datasets/oscar) or [LAION-2B](https://huggingface.co/datasets/laion/laion2B-en).
Learn more about how to stream a dataset in the [Dataset Streaming Guide](./stream).
This is not the only difference though, because the "lazy" behavior of an `IterableDataset` is also present when it comes to dataset creation and processing.
## Creating map-style datasets and iterable datasets
You can create a [`Dataset`] using lists or dictionaries, and the data is entirely converted to Arrow so you can easily access any row:
```python
my_dataset = Dataset.from_dict({"col_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})
print(my_dataset[0])
```
To create an `IterableDataset` on the other hand, you must provide a "lazy" way to load the data.
In Python, we generally use generator functions. These functions `yield` one example at a time, which means you can't access a row by slicing it like a regular `Dataset`:
```python
def my_generator(n):
for i in range(n):
yield {"col_1": i}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs={"n": 10})
for example in my_iterable_dataset:
print(example)
break
```
## Loading local files entirely and progressively
It is possible to convert local or remote data files to an Arrow [`Dataset`] using [`load_dataset`]:
```python
data_files = {"train": ["path/to/data.csv"]}
my_dataset = load_dataset("csv", data_files=data_files, split="train")
print(my_dataset[0])
```
However, this requires a conversion step from CSV to Arrow format, which takes time and disk space if your dataset is big.
To save disk space and skip the conversion step, you can define an `IterableDataset` by streaming from the local files directly.
This way, the data is read progressively from the local files as you iterate over the dataset:
```python
data_files = {"train": ["path/to/data.csv"]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
for example in my_iterable_dataset: # this reads the CSV file progressively as you iterate over the dataset
print(example)
break
```
Many file formats are supported, like CSV, JSONL, and Parquet, as well as image and audio files.
You can find more information in the corresponding guides for loading [tabular](./tabular_load), [text](./nlp_load), [vision](./image_load), and [audio](./audio_load]) datasets.
## Eager data processing and lazy data processing
When you process a [`Dataset`] object using [`Dataset.map`], the entire dataset is processed immediately and returned.
This is similar to how `pandas` works for example.
```python
my_dataset = my_dataset.map(process_fn) # process_fn is applied on all the examples of the dataset
print(my_dataset[0])
```
On the other hand, due to the "lazy" nature of an `IterableDataset`, calling [`IterableDataset.map`] does not apply your `map` function over the full dataset.
Instead, your `map` function is applied on-the-fly.
Because of that, you can chain multiple processing steps and they will all run at once when you start iterating over the dataset:
```python
my_iterable_dataset = my_iterable_dataset.map(process_fn_1)
my_iterable_dataset = my_iterable_dataset.filter(filter_fn)
my_iterable_dataset = my_iterable_dataset.map(process_fn_2)
# process_fn_1, filter_fn and process_fn_2 are applied on-the-fly when iterating over the dataset
for example in my_iterable_dataset:
print(example)
break
```
## Exact and fast approximate shuffling
When you shuffle a [`Dataset`] using [`Dataset.shuffle`], you apply an exact shuffling of the dataset.
It works by taking a list of indices `[0, 1, 2, ... len(my_dataset) - 1]` and shuffling this list.
Then, accessing `my_dataset[0]` returns the row and index defined by the first element of the indices mapping that has been shuffled:
```python
my_dataset = my_dataset.shuffle(seed=42)
print(my_dataset[0])
```
Since we don't have random access to the rows in the case of an `IterableDataset`, we can't use a shuffled list of indices and access a row at an arbitrary position.
This prevents the use of exact shuffling.
Instead, a fast approximate shuffling is used in [`IterableDataset.shuffle`].
It uses a shuffle buffer to sample random examples iteratively from the dataset.
Since the dataset is still read iteratively, it provides excellent speed performance:
```python
my_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in my_iterable_dataset:
print(example)
break
```
But using a shuffle buffer is not enough to provide a satisfactory shuffling for machine learning model training. So [`IterableDataset.shuffle`] also shuffles the dataset shards if your dataset is made of multiple files or sources:
```python
# Stream from the internet
my_iterable_dataset = load_dataset("deepmind/code_contests", split="train", streaming=True)
my_iterable_dataset.num_shards # 39
# Stream from local files
data_files = {"train": [f"path/to/data_{i}.csv" for i in range(1024)]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
my_iterable_dataset.num_shards # 1024
# From a generator function
def my_generator(n, sources):
for source in sources:
for example_id_for_current_source in range(n):
yield {"example_id": f"{source}_{example_id_for_current_source}"}
gen_kwargs = {"n": 10, "sources": [f"path/to/data_{i}" for i in range(1024)]}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs=gen_kwargs)
my_iterable_dataset.num_shards # 1024
```
## Speed differences
Regular [`Dataset`] objects are based on Arrow which provides fast random access to the rows.
Thanks to memory mapping and the fact that Arrow is an in-memory format, reading data from disk doesn't do expensive system calls and deserialization.
It provides even faster data loading when iterating using a `for` loop by iterating on contiguous Arrow record batches.
However as soon as your [`Dataset`] has an indices mapping (via [`Dataset.shuffle`] for example), the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
This may take a lot of time depending on the size of your dataset though:
```python
my_dataset[0] # fast
my_dataset = my_dataset.shuffle(seed=42)
my_dataset[0] # up to 10x slower
my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
my_dataset[0] # fast again
```
In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal.
You can also reshuffle the dataset easily:
```python
for example in enumerate(my_iterable_dataset): # fast
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in enumerate(shuffled_iterable_dataset): # as fast as before
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=1337, buffer_size=100) # reshuffling using another seed is instantaneous
for example in enumerate(shuffled_iterable_dataset): # still as fast as before
pass
```
If you're using your dataset on multiple epochs, the effective seed to shuffle the shards order in the shuffle buffer is `seed + epoch`.
It makes it easy to reshuffle a dataset between epochs:
```python
for epoch in range(n_epochs):
my_iterable_dataset.set_epoch(epoch)
for example in my_iterable_dataset: # fast + reshuffled at each epoch using `effective_seed = seed + epoch`
pass
```
To restart the iteration of a map-style dataset, you can simply skip the first examples:
```python
my_dataset = my_dataset.select(range(start_index, len(dataset)))
```
But if you use a `DataLoader` with a `Sampler`, you should instead save the state of your sampler (you might have written a custom sampler that allows resuming).
On the other hand, iterable datasets don't provide random access to a specific example index to resume from. But you can use [`IterableDataset.state_dict`] and [`IterableDataset.load_state_dict`] to resume from a checkpoint instead, similarly to what you can do for models and optimizers:
```python
>>> iterable_dataset = Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3)
>>> # save in the middle of training
>>> state_dict = iterable_dataset.state_dict()
>>> # and resume later
>>> iterable_dataset.load_state_dict(state_dict)
```
Under the hood, the iterable dataset keeps track of the current shard being read and the example index in the current shard and it stores this info in the `state_dict`.
To resume from a checkpoint, the dataset skips all the shards that were previously read to restart from the current shard.
Then it reads the shard and skips examples until it reaches the exact example from the checkpoint.
Therefore restarting a dataset is quite fast, since it will not re-read the shards that have already been iterated on. Still, resuming a dataset is generally not instantaneous since it has to restart reading from the beginning of the current shard and skip examples until it reaches the checkpoint location.
This can be used with the `StatefulDataLoader` from `torchdata`, see [streaming with a PyTorch DataLoader](./use_with_pytorch#stream-data).
## Switch from map-style to iterable
If you want to benefit from the "lazy" behavior of an [`IterableDataset`] or their speed advantages, you can switch your map-style [`Dataset`] to an [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset()
```
If you want to shuffle your dataset or [use it with a PyTorch DataLoader](./use_with_pytorch#stream-data), we recommend generating a sharded [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=1024)
my_iterable_dataset.num_shards # 1024
```
| datasets/docs/source/about_mapstyle_vs_iterable.mdx/0 | {
"file_path": "datasets/docs/source/about_mapstyle_vs_iterable.mdx",
"repo_id": "datasets",
"token_count": 3730
} |
# Load image data
Image datasets have [`Image`] type columns, which contain PIL objects.
<Tip>
To work with image datasets, you need to have the `vision` dependency installed. Check out the [installation](./installation#vision) guide to learn how to install it.
</Tip>
When you load an image dataset and call the image column, the images are decoded as PIL Images:
```py
>>> from datasets import load_dataset, Image
>>> dataset = load_dataset("beans", split="train")
>>> dataset[0]["image"]
```
<Tip warning={true}>
Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding and resampling all the image objects in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset.
</Tip>
For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>.
## Local files
You can load a dataset from the image path. Use the [`~Dataset.cast_column`] function to accept a column of image file paths, and decode it into a PIL image with the [`Image`] feature:
```py
>>> from datasets import Dataset, Image
>>> dataset = Dataset.from_dict({"image": ["path/to/image_1", "path/to/image_2", ..., "path/to/image_n"]}).cast_column("image", Image())
>>> dataset[0]["image"]
<PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>]
```
If you only want to load the underlying path to the image dataset without decoding the image object, set `decode=False` in the [`Image`] feature:
```py
>>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False))
>>> dataset[0]["image"]
{'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/bean_rust/bean_rust_train.29.jpg'}
```
## ImageFolder
You can also load a dataset with an `ImageFolder` dataset builder which does not require writing a custom dataloader. This makes `ImageFolder` ideal for quickly creating and loading image datasets with several thousand images for different vision tasks. Your image dataset structure should look like this:
```
folder/train/dog/golden_retriever.png
folder/train/dog/german_shepherd.png
folder/train/dog/chihuahua.png
folder/train/cat/maine_coon.png
folder/train/cat/bengal.png
folder/train/cat/birman.png
```
Load your dataset by specifying `imagefolder` and the directory of your dataset in `data_dir`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder")
>>> dataset["train"][0]
{"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>, "label": 0}
>>> dataset["train"][-1]
{"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E8DAD30>, "label": 1}
```
Load remote datasets from their URLs with the `data_files` parameter:
```py
>>> dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip", split="train")
```
Some datasets have a metadata file (`metadata.csv`/`metadata.jsonl`) associated with it, containing other information about the data like bounding boxes, text captions, and labels. The metadata is automatically loaded when you call [`load_dataset`] and specify `imagefolder`.
To ignore the information in the metadata file, set `drop_labels=False` in [`load_dataset`], and allow `ImageFolder` to automatically infer the label name from the directory name:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", drop_labels=False)
```
<Tip>
For more information about creating your own `ImageFolder` dataset, take a look at the [Create an image dataset](./image_dataset) guide.
</Tip>
## WebDataset
The [WebDataset](https://github.com/webdataset/webdataset) format is based on a folder of TAR archives and is suitable for big image datasets.
Because of their size, WebDatasets are generally loaded in streaming mode (using `streaming=True`).
You can load a WebDataset like this:
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", streaming=True)
```
| datasets/docs/source/image_load.mdx/0 | {
"file_path": "datasets/docs/source/image_load.mdx",
"repo_id": "datasets",
"token_count": 1388
} |
# Process
🤗 Datasets provides many tools for modifying the structure and content of a dataset. These tools are important for tidying up a dataset, creating additional columns, converting between features and formats, and much more.
This guide will show you how to:
- Reorder rows and split the dataset.
- Rename and remove columns, and other common column operations.
- Apply processing functions to each example in a dataset.
- Concatenate datasets.
- Apply a custom formatting transform.
- Save and export processed datasets.
For more details specific to processing other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_process">process audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_process">process image dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_process">process text dataset guide</a>.
The examples in this guide use the MRPC dataset, but feel free to load any dataset of your choice and follow along!
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("glue", "mrpc", split="train")
```
<Tip warning={true}>
All processing methods in this guide return a new [`Dataset`] object. Modification is not done in-place. Be careful about overriding your previous dataset!
</Tip>
## Sort, shuffle, select, split, and shard
There are several functions for rearranging the structure of a dataset.
These functions are useful for selecting only the rows you want, creating train and test splits, and sharding very large datasets into smaller chunks.
### Sort
Use [`~Dataset.sort`] to sort column values according to their numerical values. The provided column must be NumPy compatible.
```py
>>> dataset["label"][:10]
[1, 0, 1, 0, 1, 1, 0, 1, 0, 0]
>>> sorted_dataset = dataset.sort("label")
>>> sorted_dataset["label"][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> sorted_dataset["label"][-10:]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
Under the hood, this creates a list of indices that is sorted according to values of the column.
This indices mapping is then used to access the right rows in the underlying Arrow table.
### Shuffle
The [`~Dataset.shuffle`] function randomly rearranges the column values. You can specify the `generator` parameter in this function to use a different `numpy.random.Generator` if you want more control over the algorithm used to shuffle the dataset.
```py
>>> shuffled_dataset = sorted_dataset.shuffle(seed=42)
>>> shuffled_dataset["label"][:10]
[1, 1, 1, 0, 1, 1, 1, 1, 1, 0]
```
Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
Alternatively, you can switch to an [`IterableDataset`] and leverage its fast approximate shuffling [`IterableDataset.shuffle`]:
```py
>>> iterable_dataset = dataset.to_iterable_dataset(num_shards=128)
>>> shuffled_iterable_dataset = iterable_dataset.shuffle(seed=42, buffer_size=1000)
```
### Select and Filter
There are two options for filtering rows in a dataset: [`~Dataset.select`] and [`~Dataset.filter`].
- [`~Dataset.select`] returns rows according to a list of indices:
```py
>>> small_dataset = dataset.select([0, 10, 20, 30, 40, 50])
>>> len(small_dataset)
6
```
- [`~Dataset.filter`] returns rows that match a specified condition:
```py
>>> start_with_ar = dataset.filter(lambda example: example["sentence1"].startswith("Ar"))
>>> len(start_with_ar)
6
>>> start_with_ar["sentence1"]
['Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .',
'Arison said Mann may have been one of the pioneers of the world music movement and he had a deep love of Brazilian music .',
'Arts helped coach the youth on an eighth-grade football team at Lombardi Middle School in Green Bay .',
'Around 9 : 00 a.m. EDT ( 1300 GMT ) , the euro was at $ 1.1566 against the dollar , up 0.07 percent on the day .',
"Arguing that the case was an isolated example , Canada has threatened a trade backlash if Tokyo 's ban is not justified on scientific grounds .",
'Artists are worried the plan would harm those who need help most - performers who have a difficult time lining up shows .'
]
```
[`~Dataset.filter`] can also filter by indices if you set `with_indices=True`:
```py
>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
>>> len(even_dataset)
1834
>>> len(dataset) / 2
1834.0
```
Unless the list of indices to keep is contiguous, those methods also create an indices mapping under the hood.
### Split
The [`~Dataset.train_test_split`] function creates train and test splits if your dataset doesn't already have them. This allows you to adjust the relative proportions or an absolute number of samples in each split. In the example below, use the `test_size` parameter to create a test split that is 10% of the original dataset:
```py
>>> dataset.train_test_split(test_size=0.1)
{'train': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 3301),
'test': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 367)}
>>> 0.1 * len(dataset)
366.8
```
The splits are shuffled by default, but you can set `shuffle=False` to prevent shuffling.
### Shard
🤗 Datasets supports sharding to divide a very large dataset into a predefined number of chunks. Specify the `num_shards` parameter in [`~Dataset.shard`] to determine the number of shards to split the dataset into. You'll also need to provide the shard you want to return with the `index` parameter.
For example, the [imdb](https://huggingface.co/datasets/imdb) dataset has 25000 examples:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imdb", split="train")
>>> print(dataset)
Dataset({
features: ['text', 'label'],
num_rows: 25000
})
```
After sharding the dataset into four chunks, the first shard will only have 6250 examples:
```py
>>> dataset.shard(num_shards=4, index=0)
Dataset({
features: ['text', 'label'],
num_rows: 6250
})
>>> print(25000/4)
6250.0
```
## Rename, remove, cast, and flatten
The following functions allow you to modify the columns of a dataset. These functions are useful for renaming or removing columns, changing columns to a new set of features, and flattening nested column structures.
### Rename
Use [`~Dataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place.
Provide [`~Dataset.rename_column`] with the name of the original column, and the new column name:
```py
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'label', 'idx'],
num_rows: 3668
})
>>> dataset = dataset.rename_column("sentence1", "sentenceA")
>>> dataset = dataset.rename_column("sentence2", "sentenceB")
>>> dataset
Dataset({
features: ['sentenceA', 'sentenceB', 'label', 'idx'],
num_rows: 3668
})
```
### Remove
When you need to remove one or more columns, provide the column name to remove to the [`~Dataset.remove_columns`] function. Remove more than one column by providing a list of column names:
```py
>>> dataset = dataset.remove_columns("label")
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'idx'],
num_rows: 3668
})
>>> dataset = dataset.remove_columns(["sentence1", "sentence2"])
>>> dataset
Dataset({
features: ['idx'],
num_rows: 3668
})
```
Conversely, [`~Dataset.select_columns`] selects one or more columns to keep and removes the rest. This function takes either one or a list of column names:
```py
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'label', 'idx'],
num_rows: 3668
})
>>> dataset = dataset.select_columns(['sentence1', 'sentence2', 'idx'])
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'idx'],
num_rows: 3668
})
>>> dataset = dataset.select_columns('idx')
>>> dataset
Dataset({
features: ['idx'],
num_rows: 3668
})
```
### Cast
The [`~Dataset.cast`] function transforms the feature type of one or more columns. This function accepts your new [`Features`] as its argument. The example below demonstrates how to change the [`ClassLabel`] and [`Value`] features:
```py
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(names=['not_equivalent', 'equivalent'], id=None),
'idx': Value(dtype='int32', id=None)}
>>> from datasets import ClassLabel, Value
>>> new_features = dataset.features.copy()
>>> new_features["label"] = ClassLabel(names=["negative", "positive"])
>>> new_features["idx"] = Value("int64")
>>> dataset = dataset.cast(new_features)
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(names=['negative', 'positive'], id=None),
'idx': Value(dtype='int64', id=None)}
```
<Tip>
Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value("int32")` to `Value("bool")` if the original column only contains ones and zeros.
</Tip>
Use the [`~Dataset.cast_column`] function to change the feature type of a single column. Pass the column name and its new feature type as arguments:
```py
>>> dataset.features
{'audio': Audio(sampling_rate=44100, mono=True, id=None)}
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
>>> dataset.features
{'audio': Audio(sampling_rate=16000, mono=True, id=None)}
```
### Flatten
Sometimes a column can be a nested structure of several types. Take a look at the nested structure below from the SQuAD dataset:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("squad", split="train")
>>> dataset.features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
The `answers` field contains two subfields: `text` and `answer_start`. Use the [`~Dataset.flatten`] function to extract the subfields into their own separate columns:
```py
>>> flat_dataset = dataset.flatten()
>>> flat_dataset
Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
```
Notice how the subfields are now their own independent columns: `answers.text` and `answers.answer_start`.
## Map
Some of the more powerful applications of 🤗 Datasets come from using the [`~Dataset.map`] function. The primary purpose of [`~Dataset.map`] is to speed up processing functions. It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns.
In the following example, prefix each `sentence1` value in the dataset with `'My sentence: '`.
Start by creating a function that adds `'My sentence: '` to the beginning of each sentence. The function needs to accept and output a `dict`:
```py
>>> def add_prefix(example):
... example["sentence1"] = 'My sentence: ' + example["sentence1"]
... return example
```
Now use [`~Dataset.map`] to apply the `add_prefix` function to the entire dataset:
```py
>>> updated_dataset = small_dataset.map(add_prefix)
>>> updated_dataset["sentence1"][:5]
['My sentence: Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
"My sentence: Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
'My sentence: They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .',
'My sentence: Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .',
]
```
Let's take a look at another example, except this time, you'll remove a column with [`~Dataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed.
Specify the column to remove with the `remove_columns` parameter in [`~Dataset.map`]:
```py
>>> updated_dataset = dataset.map(lambda example: {"new_sentence": example["sentence1"]}, remove_columns=["sentence1"])
>>> updated_dataset.column_names
['sentence2', 'label', 'idx', 'new_sentence']
```
<Tip>
🤗 Datasets also has a [`~Dataset.remove_columns`] function which is faster because it doesn't copy the data of the remaining columns.
</Tip>
You can also use [`~Dataset.map`] with indices if you set `with_indices=True`. The example below adds the index to the beginning of each sentence:
```py
>>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True)
>>> updated_dataset["sentence2"][:5]
['0: Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
"1: Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .",
"2: On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .",
'3: Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .',
'4: PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .'
]
```
### Multiprocessing
Multiprocessing significantly speeds up processing by parallelizing processes on the CPU. Set the `num_proc` parameter in [`~Dataset.map`] to set the number of processes to use:
```py
>>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True, num_proc=4)
```
The [`~Dataset.map`] also works with the rank of the process if you set `with_rank=True`. This is analogous to the `with_indices` parameter. The `with_rank` parameter in the mapped function goes after the `index` one if it is already present.
```py
>>> import torch
>>> from multiprocess import set_start_method
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> from datasets import load_dataset
>>>
>>> # Get an example dataset
>>> dataset = load_dataset("fka/awesome-chatgpt-prompts", split="train")
>>>
>>> # Get an example model and its tokenizer
>>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B-Chat").eval()
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat")
>>>
>>> def gpu_computation(batch, rank):
... # Move the model on the right GPU if it's not there already
... device = f"cuda:{(rank or 0) % torch.cuda.device_count()}"
... model.to(device)
...
... # Your big GPU call goes here, for example:
... chats = [[
... {"role": "system", "content": "You are a helpful assistant."},
... {"role": "user", "content": prompt}
... ] for prompt in batch["prompt"]]
... texts = [tokenizer.apply_chat_template(
... chat,
... tokenize=False,
... add_generation_prompt=True
... ) for chat in chats]
... model_inputs = tokenizer(texts, padding=True, return_tensors="pt").to(device)
... with torch.no_grad():
... outputs = model.generate(**model_inputs, max_new_tokens=512)
... batch["output"] = tokenizer.batch_decode(outputs, skip_special_tokens=True)
... return batch
>>>
>>> if __name__ == "__main__":
... set_start_method("spawn")
... updated_dataset = dataset.map(
... gpu_computation,
... batched=True,
... batch_size=16,
... with_rank=True,
... num_proc=torch.cuda.device_count(), # one process per GPU
... )
```
The main use-case for rank is to parallelize computation across several GPUs. This requires setting `multiprocess.set_start_method("spawn")`. If you don't you'll receive the following CUDA error:
```bash
RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method.
```
### Batch processing
The [`~Dataset.map`] function supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` parameter. Batch processing enables interesting applications such as splitting long sentences into shorter chunks and data augmentation.
#### Split long examples
When examples are too long, you may want to split them into several smaller chunks. Begin by creating a function that:
1. Splits the `sentence1` field into chunks of 50 characters.
2. Stacks all the chunks together to create the new dataset.
```py
>>> def chunk_examples(examples):
... chunks = []
... for sentence in examples["sentence1"]:
... chunks += [sentence[i:i + 50] for i in range(0, len(sentence), 50)]
... return {"chunks": chunks}
```
Apply the function with [`~Dataset.map`]:
```py
>>> chunked_dataset = dataset.map(chunk_examples, batched=True, remove_columns=dataset.column_names)
>>> chunked_dataset[:10]
{'chunks': ['Amrozi accused his brother , whom he called " the ',
'witness " , of deliberately distorting his evidenc',
'e .',
"Yucaipa owned Dominick 's before selling the chain",
' to Safeway in 1998 for $ 2.5 billion .',
'They had published an advertisement on the Interne',
't on June 10 , offering the cargo for sale , he ad',
'ded .',
'Around 0335 GMT , Tab shares were up 19 cents , or',
' 4.4 % , at A $ 4.56 , having earlier set a record']}
```
Notice how the sentences are split into shorter chunks now, and there are more rows in the dataset.
```py
>>> dataset
Dataset({
features: ['sentence1', 'sentence2', 'label', 'idx'],
num_rows: 3668
})
>>> chunked_dataset
Dataset({
features: ['chunks'],
num_rows: 10470
})
```
#### Data augmentation
The [`~Dataset.map`] function could also be used for data augmentation. The following example generates additional words for a masked token in a sentence.
Load and use the [RoBERTA](https://huggingface.co/roberta-base) model in 🤗 Transformers' [FillMaskPipeline](https://huggingface.co/transformers/main_classes/pipelines#transformers.FillMaskPipeline):
```py
>>> from random import randint
>>> from transformers import pipeline
>>> fillmask = pipeline("fill-mask", model="roberta-base")
>>> mask_token = fillmask.tokenizer.mask_token
>>> smaller_dataset = dataset.filter(lambda e, i: i<100, with_indices=True)
```
Create a function to randomly select a word to mask in the sentence. The function should also return the original sentence and the top two replacements generated by RoBERTA.
```py
>>> def augment_data(examples):
... outputs = []
... for sentence in examples["sentence1"]:
... words = sentence.split(' ')
... K = randint(1, len(words)-1)
... masked_sentence = " ".join(words[:K] + [mask_token] + words[K+1:])
... predictions = fillmask(masked_sentence)
... augmented_sequences = [predictions[i]["sequence"] for i in range(3)]
... outputs += [sentence] + augmented_sequences
...
... return {"data": outputs}
```
Use [`~Dataset.map`] to apply the function over the whole dataset:
```py
>>> augmented_dataset = smaller_dataset.map(augment_data, batched=True, remove_columns=dataset.column_names, batch_size=8)
>>> augmented_dataset[:9]["data"]
['Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
'Amrozi accused his brother, whom he called " the witness ", of deliberately withholding his evidence.',
'Amrozi accused his brother, whom he called " the witness ", of deliberately suppressing his evidence.',
'Amrozi accused his brother, whom he called " the witness ", of deliberately destroying his evidence.',
"Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
'Yucaipa owned Dominick Stores before selling the chain to Safeway in 1998 for $ 2.5 billion.',
"Yucaipa owned Dominick's before selling the chain to Safeway in 1998 for $ 2.5 billion.",
'Yucaipa owned Dominick Pizza before selling the chain to Safeway in 1998 for $ 2.5 billion.'
]
```
For each original sentence, RoBERTA augmented a random word with three alternatives. The original word `distorting` is supplemented by `withholding`, `suppressing`, and `destroying`.
### Process multiple splits
Many datasets have splits that can be processed simultaneously with [`DatasetDict.map`]. For example, tokenize the `sentence1` field in the train and test split by:
```py
>>> from datasets import load_dataset
# load all the splits
>>> dataset = load_dataset('glue', 'mrpc')
>>> encoded_dataset = dataset.map(lambda examples: tokenizer(examples["sentence1"]), batched=True)
>>> encoded_dataset["train"][0]
{'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
'label': 1,
'idx': 0,
'input_ids': [ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
}
```
### Distributed usage
When you use [`~Dataset.map`] in a distributed setting, you should also use [torch.distributed.barrier](https://pytorch.org/docs/stable/distributed?highlight=barrier#torch.distributed.barrier). This ensures the main process performs the mapping, while the other processes load the results, thereby avoiding duplicate work.
The following example shows how you can use `torch.distributed.barrier` to synchronize the processes:
```py
>>> from datasets import Dataset
>>> import torch.distributed
>>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> if training_args.local_rank > 0:
... print("Waiting for main process to perform the mapping")
... torch.distributed.barrier()
>>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1})
>>> if training_args.local_rank == 0:
... print("Loading results from main process")
... torch.distributed.barrier()
```
## Batch
The [`~Dataset.batch`] method allows you to group samples from the dataset into batches. This is particularly useful when you want to create batches of data for training or evaluation, especially when working with deep learning models.
Here's an example of how to use the `batch()` method:
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes", split="train")
>>> batched_dataset = dataset.batch(batch_size=4)
>>> batched_dataset[0]
{'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'effective but too-tepid biopic',
'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'],
'label': [1, 1, 1, 1]}
```
The `batch()` method accepts the following parameters:
- `batch_size` (`int`): The number of samples in each batch.
- `drop_last_batch` (`bool`, defaults to `False`): Whether to drop the last incomplete batch if the dataset size is not divisible by the batch size.
- `num_proc` (`int`, optional, defaults to `None`): The number of processes to use for multiprocessing. If None, no multiprocessing is used. This can significantly speed up batching for large datasets.
Note that `Dataset.batch()` returns a new [`Dataset`] where each item is a batch of multiple samples from the original dataset. If you want to process data in batches, you should use a batched [`~Dataset.map`] directly, which applies a function to batches but the output dataset is unbatched.
## Concatenate
Separate datasets can be concatenated if they share the same column types. Concatenate datasets with [`concatenate_datasets`]:
```py
>>> from datasets import concatenate_datasets, load_dataset
>>> bookcorpus = load_dataset("bookcorpus", split="train")
>>> wiki = load_dataset("wikipedia", "20220301.en", split="train")
>>> wiki = wiki.remove_columns([col for col in wiki.column_names if col != "text"]) # only keep the 'text' column
>>> assert bookcorpus.features.type == wiki.features.type
>>> bert_dataset = concatenate_datasets([bookcorpus, wiki])
```
You can also concatenate two datasets horizontally by setting `axis=1` as long as the datasets have the same number of rows:
```py
>>> from datasets import Dataset
>>> bookcorpus_ids = Dataset.from_dict({"ids": list(range(len(bookcorpus)))})
>>> bookcorpus_with_ids = concatenate_datasets([bookcorpus, bookcorpus_ids], axis=1)
```
### Interleave
You can also mix several datasets together by taking alternating examples from each one to create a new dataset. This is known as *interleaving*, which is enabled by the [`interleave_datasets`] function. Both [`interleave_datasets`] and [`concatenate_datasets`] work with regular [`Dataset`] and [`IterableDataset`] objects.
Refer to the [Stream](./stream#interleave) guide for an example of how to interleave [`IterableDataset`] objects.
You can define sampling probabilities for each of the original datasets to specify how to interleave the datasets.
In this case, the new dataset is constructed by getting examples one by one from a random dataset until one of the datasets runs out of samples.
```py
>>> from datasets import Dataset, interleave_datasets
>>> seed = 42
>>> probabilities = [0.3, 0.5, 0.2]
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
>>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)
>>> dataset["a"]
[10, 11, 20, 12, 0, 21, 13]
```
You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples.
You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached.
Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`.
```py
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
>>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
>>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
>>> dataset["a"]
[0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20]
```
## Format
The [`~Dataset.with_format`] function changes the format of a column to be compatible with some common data formats. Specify the output you'd like in the `type` parameter. You can also choose which the columns you want to format using `columns=`. Formatting is applied on-the-fly.
For example, create PyTorch tensors by setting `type="torch"`:
```py
>>> dataset = dataset.with_format(type="torch")
```
The [`~Dataset.set_format`] function also changes the format of a column, except it runs in-place:
```py
>>> dataset.set_format(type="torch")
```
If you need to reset the dataset to its original format, set the format to `None` (or use [`~Dataset.reset_format`]):
```py
>>> dataset.format
{'type': 'torch', 'format_kwargs': {}, 'columns': [...], 'output_all_columns': False}
>>> dataset = dataset.with_format(None)
>>> dataset.format
{'type': None, 'format_kwargs': {}, 'columns': [...], 'output_all_columns': False}
```
### Tensors formats
Several tensors or arrays formats are supported. It is generally recommended to use these formats instead of converting outputs of a dataset to tensors or arrays manually to avoid unnecessary data copies and accelerate data loading.
Here is the list of supported tensors or arrays formats:
- NumPy: format name is "numpy", for more information see [Using Datasets with NumPy](use_with_numpy)
- PyTorch: format name is "torch", for more information see [Using Datasets with PyTorch](use_with_pytorch)
- TensorFlow: format name is "tensorflow", for more information see [Using Datasets with TensorFlow](use_with_tensorflow)
- JAX: format name is "jax", for more information see [Using Datasets with JAX](use_with_jax)
<Tip>
Check out the [Using Datasets with TensorFlow](use_with_tensorflow#using-totfdataset) guide for more details on how to efficiently create a TensorFlow dataset.
</Tip>
When a dataset is formatted in a tensor or array format, all the data are formatted as tensors or arrays (except unsupported types like strings for example for PyTorch):
```python
>>> ds = Dataset.from_dict({"text": ["foo", "bar"], "tokens": [[0, 1, 2], [3, 4, 5]]})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'text': 'foo', 'tokens': tensor([0, 1, 2])}
>>> ds[:2]
{'text': ['foo', 'bar'],
'tokens': tensor([[0, 1, 2],
[3, 4, 5]])}
```
### Tabular formats
You can use a dataframes or tables format to optimize data loading and data processing, since they generally offer zero-copy operations and transforms written in low-level languages.
Here is the list of supported dataframes or tables formats:
- Pandas: format name is "pandas", for more information see [Using Datasets with Pandas](use_with_pandas)
- Polars: format name is "polars", for more information see [Using Datasets with Polars](use_with_polars)
- PyArrow: format name is "arrow", for more information see [Using Datasets with PyArrow](use_with_tensorflow)
When a dataset is formatted in a dataframe or table format, every dataset row or batches of rows is formatted as a dataframe or table, and dataset colums are formatted as a series or array:
```python
>>> ds = Dataset.from_dict({"text": ["foo", "bar"], "label": [0, 1]})
>>> ds = ds.with_format("pandas")
>>> ds[:2]
text label
0 foo 0
1 bar 1
```
Those formats make it possible to iterate on the data faster by avoiding data copies, and also enable faster data processing in [`~Dataset.map`] or [`~Dataset.filter`]:
```python
>>> ds = ds.map(lambda df: df.assign(upper_text=df.text.str.upper()), batched=True)
>>> ds[:2]
text label upper_text
0 foo 0 FOO
1 bar 1 BAR
```
### Custom format transform
The [`~Dataset.with_transform`] function applies a custom formatting transform on-the-fly. This function replaces any previously specified format. For example, you can use this function to tokenize and pad tokens on-the-fly. Tokenization is only applied when examples are accessed:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
>>> def encode(batch):
... return tokenizer(batch["sentence1"], batch["sentence2"], padding="longest", truncation=True, max_length=512, return_tensors="pt")
>>> dataset = dataset.with_transform(encode)
>>> dataset.format
{'type': 'custom', 'format_kwargs': {'transform': <function __main__.encode(batch)>}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False}
```
There is also [`~Dataset.set_transform`] which does the same but runs in-place.
You can also use the [`~Dataset.with_transform`] function to decode formats not supported by [`Features`]. For example, the [`Audio`] feature uses [`soundfile`](https://python-soundfile.readthedocs.io/en/0.11.0/) - a fast and simple library to install - but it does not provide support for less common audio formats. Here is where you can use [`~Dataset.set_transform`] to apply a custom decoding transform on the fly. You're free to use any library you like to decode the audio files.
The example below uses the [`pydub`](http://pydub.com/) package to open an audio format not supported by `soundfile`:
```py
>>> import numpy as np
>>> from pydub import AudioSegment
>>> audio_dataset_amr = Dataset.from_dict({"audio": ["audio_samples/audio.amr"]})
>>> def decode_audio_with_pydub(batch, sampling_rate=16_000):
... def pydub_decode_file(audio_path):
... sound = AudioSegment.from_file(audio_path)
... if sound.frame_rate != sampling_rate:
... sound = sound.set_frame_rate(sampling_rate)
... channel_sounds = sound.split_to_mono()
... samples = [s.get_array_of_samples() for s in channel_sounds]
... fp_arr = np.array(samples).T.astype(np.float32)
... fp_arr /= np.iinfo(samples[0].typecode).max
... return fp_arr
...
... batch["audio"] = [pydub_decode_file(audio_path) for audio_path in batch["audio"]]
... return batch
>>> audio_dataset_amr.set_transform(decode_audio_with_pydub)
```
## Save
Once you are done processing your dataset, you can save and reuse it later with [`~Dataset.save_to_disk`].
Save your dataset by providing the path to the directory you wish to save it to:
```py
>>> encoded_dataset.save_to_disk("path/of/my/dataset/directory")
```
Use the [`load_from_disk`] function to reload the dataset:
```py
>>> from datasets import load_from_disk
>>> reloaded_dataset = load_from_disk("path/of/my/dataset/directory")
```
<Tip>
Want to save your dataset to a cloud storage provider? Read our [Cloud Storage](./filesystems) guide to learn how to save your dataset to AWS or Google Cloud Storage.
</Tip>
## Export
🤗 Datasets supports exporting as well so you can work with your dataset in other applications. The following table shows currently supported file formats you can export to:
| File type | Export method |
|-------------------------|----------------------------------------------------------------|
| CSV | [`Dataset.to_csv`] |
| JSON | [`Dataset.to_json`] |
| Parquet | [`Dataset.to_parquet`] |
| SQL | [`Dataset.to_sql`] |
| In-memory Python object | [`Dataset.to_pandas`], [`Dataset.to_polars`] or [`Dataset.to_dict`] |
For example, export your dataset to a CSV file like this:
```py
>>> encoded_dataset.to_csv("path/of/my/dataset.csv")
```
| datasets/docs/source/process.mdx/0 | {
"file_path": "datasets/docs/source/process.mdx",
"repo_id": "datasets",
"token_count": 11695
} |
# Use with PyTorch
This document is a quick introduction to using `datasets` with PyTorch, with a particular focus on how to get
`torch.Tensor` objects out of our datasets, and how to use a PyTorch `DataLoader` and a Hugging Face `Dataset`
with the best performance.
## Dataset format
By default, datasets return regular python objects: integers, floats, strings, lists, etc.
To get PyTorch tensors instead, you can set the format of the dataset to `pytorch` using [`Dataset.with_format`]:
```py
>>> from datasets import Dataset
>>> data = [[1, 2],[3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': tensor([1, 2])}
>>> ds[:2]
{'data': tensor([[1, 2],
[3, 4]])}
```
<Tip>
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast zero-copy reads from arrays in the dataset to PyTorch tensors.
</Tip>
To load the data as tensors on a GPU, specify the `device` argument:
```py
>>> import torch
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> ds = ds.with_format("torch", device=device)
>>> ds[0]
{'data': tensor([1, 2], device='cuda:0')}
```
### N-dimensional arrays
If your dataset consists of N-dimensional arrays, you will see that by default they are considered as the same tensor if the shape is fixed:
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] # fixed shape
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': tensor([[1, 2],
[3, 4]])}
```
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3]],[[4, 5, 6],[7, 8]]] # varying shape
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': [tensor([1, 2]), tensor([3])]}
```
However this logic often requires slow shape comparisons and data copies.
To avoid this, you must explicitly use the [`Array`] feature type and specify the shape of your tensors:
```py
>>> from datasets import Dataset, Features, Array2D
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')})
>>> ds = Dataset.from_dict({"data": data}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': tensor([[1, 2],
[3, 4]])}
>>> ds[:2]
{'data': tensor([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])}
```
### Other feature types
[`ClassLabel`] data are properly converted to tensors:
```py
>>> from datasets import Dataset, Features, ClassLabel
>>> labels = [0, 0, 1]
>>> features = Features({"label": ClassLabel(names=["negative", "positive"])})
>>> ds = Dataset.from_dict({"label": labels}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[:3]
{'label': tensor([0, 0, 1])}
```
String and binary objects are unchanged, since PyTorch only supports numbers.
The [`Image`] and [`Audio`] feature types are also supported.
<Tip>
To use the [`Image`] feature type, you'll need to install the `vision` extra as
`pip install datasets[vision]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> images = ["path/to/image.png"] * 10
>>> features = Features({"image": Image()})
>>> ds = Dataset.from_dict({"image": images}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]["image"].shape
torch.Size([512, 512, 4])
>>> ds[0]
{'image': tensor([[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]], dtype=torch.uint8)}
>>> ds[:2]["image"].shape
torch.Size([2, 512, 512, 4])
>>> ds[:2]
{'image': tensor([[[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]]], dtype=torch.uint8)}
```
<Tip>
To use the [`Audio`] feature type, you'll need to install the `audio` extra as
`pip install datasets[audio]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> audio = ["path/to/audio.wav"] * 10
>>> features = Features({"audio": Audio()})
>>> ds = Dataset.from_dict({"audio": audio}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]["audio"]["array"]
tensor([ 6.1035e-05, 1.5259e-05, 1.6785e-04, ..., -1.5259e-05,
-1.5259e-05, 1.5259e-05])
>>> ds[0]["audio"]["sampling_rate"]
tensor(44100)
```
## Data loading
Like `torch.utils.data.Dataset` objects, a [`Dataset`] can be passed directly to a PyTorch `DataLoader`:
```py
>>> import numpy as np
>>> from datasets import Dataset
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(16)
>>> label = np.random.randint(0, 2, size=16)
>>> ds = Dataset.from_dict({"data": data, "label": label}).with_format("torch")
>>> dataloader = DataLoader(ds, batch_size=4)
>>> for batch in dataloader:
... print(batch)
{'data': tensor([0.0047, 0.4979, 0.6726, 0.8105]), 'label': tensor([0, 1, 0, 1])}
{'data': tensor([0.4832, 0.2723, 0.4259, 0.2224]), 'label': tensor([0, 0, 0, 0])}
{'data': tensor([0.5837, 0.3444, 0.4658, 0.6417]), 'label': tensor([0, 1, 0, 0])}
{'data': tensor([0.7022, 0.1225, 0.7228, 0.8259]), 'label': tensor([1, 1, 1, 1])}
```
### Optimize data loading
There are several ways you can increase the speed your data is loaded which can save you time, especially if you are working with large datasets.
PyTorch offers parallelized data loading, retrieving batches of indices instead of individually, and streaming to iterate over the dataset without downloading it on disk.
#### Use multiple Workers
You can parallelize data loading with the `num_workers` argument of a PyTorch `DataLoader` and get a higher throughput.
Under the hood, the `DataLoader` starts `num_workers` processes.
Each process reloads the dataset passed to the `DataLoader` and is used to query examples.
Reloading the dataset inside a worker doesn't fill up your RAM, since it simply memory-maps the dataset again from your disk.
```py
>>> import numpy as np
>>> from datasets import Dataset, load_from_disk
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(10_000)
>>> Dataset.from_dict({"data": data}).save_to_disk("my_dataset")
>>> ds = load_from_disk("my_dataset").with_format("torch")
>>> dataloader = DataLoader(ds, batch_size=32, num_workers=4)
```
### Stream data
Stream a dataset by loading it as an [`IterableDataset`]. This allows you to progressively iterate over a remote dataset without downloading it on disk and or over local data files.
Learn more about which type of dataset is best for your use case in the [choosing between a regular dataset or an iterable dataset](./about_mapstyle_vs_iterable) guide.
An iterable dataset from `datasets` inherits from `torch.utils.data.IterableDataset` so you can pass it to a `torch.utils.data.DataLoader`:
```py
>>> import numpy as np
>>> from datasets import Dataset, load_dataset
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(10_000)
>>> Dataset.from_dict({"data": data}).push_to_hub("<username>/my_dataset") # Upload to the Hugging Face Hub
>>> my_iterable_dataset = load_dataset("<username>/my_dataset", streaming=True, split="train")
>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32)
```
If the dataset is split in several shards (i.e. if the dataset consists of multiple data files), then you can stream in parallel using `num_workers`:
```py
>>> my_iterable_dataset = load_dataset("deepmind/code_contests", streaming=True, split="train")
>>> my_iterable_dataset.num_shards
39
>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32, num_workers=4)
```
In this case each worker is given a subset of the list of shards to stream from.
### Checkpoint and resume
If you need a DataLoader that you can checkpoint and resume in the middle of training, you can use the `StatefulDataLoader` from [torchdata](https://github.com/pytorch/data):
```py
>>> from torchdata.stateful_dataloader import StatefulDataLoader
>>> my_iterable_dataset = load_dataset("deepmind/code_contests", streaming=True, split="train")
>>> dataloader = StatefulDataLoader(my_iterable_dataset, batch_size=32, num_workers=4)
>>> # save in the middle of training
>>> state_dict = dataloader.state_dict()
>>> # and resume later
>>> dataloader.load_state_dict(state_dict)
```
This is possible thanks to [`IterableDataset.state_dict`] and [`IterableDataset.load_state_dict`].
### Distributed
To split your dataset across your training nodes, you can use [`datasets.distributed.split_dataset_by_node`]:
```python
import os
from datasets.distributed import split_dataset_by_node
ds = split_dataset_by_node(ds, rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]))
```
This works for both map-style datasets and iterable datasets.
The dataset is split for the node at rank `rank` in a pool of nodes of size `world_size`.
For map-style datasets:
Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
For iterable datasets:
If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.num_shards % world_size == 0`),
then the shards are evenly assigned across the nodes, which is the most optimized.
Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
This can also be combined with a `torch.utils.data.DataLoader` if you want each node to use multiple workers to load the data.
| datasets/docs/source/use_with_pytorch.mdx/0 | {
"file_path": "datasets/docs/source/use_with_pytorch.mdx",
"repo_id": "datasets",
"token_count": 3446
} |
from argparse import ArgumentParser
from typing import Optional
from datasets.commands import BaseDatasetsCLICommand
from datasets.hub import convert_to_parquet
def _command_factory(args):
return ConvertToParquetCommand(
args.dataset_id,
args.token,
args.revision,
args.trust_remote_code,
)
class ConvertToParquetCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser):
parser: ArgumentParser = parser.add_parser("convert_to_parquet", help="Convert dataset to Parquet")
parser.add_argument(
"dataset_id", help="source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME"
)
parser.add_argument("--token", help="access token to the Hugging Face Hub (defaults to logged-in user's one)")
parser.add_argument("--revision", help="source revision")
parser.add_argument(
"--trust_remote_code", action="store_true", help="whether to trust the code execution of the load script"
)
parser.set_defaults(func=_command_factory)
def __init__(
self,
dataset_id: str,
token: Optional[str],
revision: Optional[str],
trust_remote_code: bool,
):
self._dataset_id = dataset_id
self._token = token
self._revision = revision
self._trust_remote_code = trust_remote_code
def run(self) -> None:
_ = convert_to_parquet(
self._dataset_id, revision=self._revision, token=self._token, trust_remote_code=self._trust_remote_code
)
| datasets/src/datasets/commands/convert_to_parquet.py/0 | {
"file_path": "datasets/src/datasets/commands/convert_to_parquet.py",
"repo_id": "datasets",
"token_count": 652
} |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""This class handle features definition in datasets and some utilities to display table type."""
import copy
import json
import re
import sys
from collections.abc import Iterable, Mapping
from collections.abc import Sequence as SequenceABC
from dataclasses import InitVar, dataclass, field, fields
from functools import reduce, wraps
from operator import mul
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
from typing import Sequence as Sequence_
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.types
from pandas.api.extensions import ExtensionArray as PandasExtensionArray
from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
from .. import config
from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
from ..table import array_cast
from ..utils import experimental, logging
from ..utils.py_utils import asdict, first_non_null_value, zip_dict
from .audio import Audio
from .image import Image, encode_pil_image
from .translation import Translation, TranslationVariableLanguages
from .video import Video
logger = logging.get_logger(__name__)
def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
"""
_arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
"""
if pyarrow.types.is_null(arrow_type):
return "null"
elif pyarrow.types.is_boolean(arrow_type):
return "bool"
elif pyarrow.types.is_int8(arrow_type):
return "int8"
elif pyarrow.types.is_int16(arrow_type):
return "int16"
elif pyarrow.types.is_int32(arrow_type):
return "int32"
elif pyarrow.types.is_int64(arrow_type):
return "int64"
elif pyarrow.types.is_uint8(arrow_type):
return "uint8"
elif pyarrow.types.is_uint16(arrow_type):
return "uint16"
elif pyarrow.types.is_uint32(arrow_type):
return "uint32"
elif pyarrow.types.is_uint64(arrow_type):
return "uint64"
elif pyarrow.types.is_float16(arrow_type):
return "float16" # pyarrow dtype is "halffloat"
elif pyarrow.types.is_float32(arrow_type):
return "float32" # pyarrow dtype is "float"
elif pyarrow.types.is_float64(arrow_type):
return "float64" # pyarrow dtype is "double"
elif pyarrow.types.is_time32(arrow_type):
return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_time64(arrow_type):
return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_timestamp(arrow_type):
if arrow_type.tz is None:
return f"timestamp[{arrow_type.unit}]"
elif arrow_type.tz:
return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
else:
raise ValueError(f"Unexpected timestamp object {arrow_type}.")
elif pyarrow.types.is_date32(arrow_type):
return "date32" # pyarrow dtype is "date32[day]"
elif pyarrow.types.is_date64(arrow_type):
return "date64" # pyarrow dtype is "date64[ms]"
elif pyarrow.types.is_duration(arrow_type):
return f"duration[{arrow_type.unit}]"
elif pyarrow.types.is_decimal128(arrow_type):
return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_decimal256(arrow_type):
return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_binary(arrow_type):
return "binary"
elif pyarrow.types.is_large_binary(arrow_type):
return "large_binary"
elif pyarrow.types.is_string(arrow_type):
return "string"
elif pyarrow.types.is_large_string(arrow_type):
return "large_string"
elif pyarrow.types.is_dictionary(arrow_type):
return _arrow_to_datasets_dtype(arrow_type.value_type)
else:
raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
def string_to_arrow(datasets_dtype: str) -> pa.DataType:
"""
string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
This is necessary because the datasets.Value() primitive type is constructed using a string dtype
Value(dtype=str)
But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
purpose of this function.
"""
def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
if examples:
examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
msg += f"\nValid examples include: {examples}."
if urls:
urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
msg += f"\nFor more insformation, see: {urls}."
return msg
if datasets_dtype in pa.__dict__:
return pa.__dict__[datasets_dtype]()
if (datasets_dtype + "_") in pa.__dict__:
return pa.__dict__[datasets_dtype + "_"]()
timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
if timestamp_matches:
timestamp_internals = timestamp_matches.group(1)
internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
if timestamp_internals in ["s", "ms", "us", "ns"]:
return pa.timestamp(timestamp_internals)
elif internals_matches:
return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"timestamp",
examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
)
)
duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
if duration_matches:
duration_internals = duration_matches.group(1)
if duration_internals in ["s", "ms", "us", "ns"]:
return pa.duration(duration_internals)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"duration",
examples=["duration[s]", "duration[us]"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
)
)
time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
if time_matches:
time_internals_bits = time_matches.group(1)
if time_internals_bits == "32":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["s", "ms"]:
return pa.time32(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
)
elif time_internals_bits == "64":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["us", "ns"]:
return pa.time64(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"time",
examples=["time32[s]", "time64[us]"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
],
)
)
decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
if decimal_matches:
decimal_internals_bits = decimal_matches.group(1)
if decimal_internals_bits == "128":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal128(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal128",
examples=["decimal128(10, 2)", "decimal128(4, -2)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
)
)
elif decimal_internals_bits == "256":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal256(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal256",
examples=["decimal256(30, 2)", "decimal256(38, -4)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
)
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal",
examples=["decimal128(12, 3)", "decimal256(40, 6)"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
],
)
)
raise ValueError(
f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
f"Please make sure to use a correct data type, see: "
f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
)
def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
"""
Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
It works recursively.
If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast.
only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
has_changed (bool): True if the object has been changed, False if it is identical
"""
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
import tensorflow as tf
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if config.JAX_AVAILABLE and "jax" in sys.modules:
import jax.numpy as jnp
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(obj, np.ndarray):
if obj.ndim == 0:
return obj[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj, False
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj
],
True,
)
elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
if obj.dtype == torch.bfloat16:
return _cast_to_python_objects(
obj.detach().to(torch.float).cpu().numpy(),
only_1d_for_numpy=only_1d_for_numpy,
optimize_list_casting=optimize_list_casting,
)[0], True
if obj.ndim == 0:
return obj.detach().cpu().numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.detach().cpu().numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.detach().cpu().numpy()
],
True,
)
elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
if obj.ndim == 0:
return obj.numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.numpy()
],
True,
)
elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
if obj.ndim == 0:
return np.asarray(obj)[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return np.asarray(obj), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in np.asarray(obj)
],
True,
)
elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
return encode_pil_image(obj), True
elif isinstance(obj, pd.Series):
return (
_cast_to_python_objects(
obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, pd.DataFrame):
return (
{
key: _cast_to_python_objects(
value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for key, value in obj.to_dict("series").items()
},
True,
)
elif isinstance(obj, pd.Timestamp):
return obj.to_pydatetime(), True
elif isinstance(obj, pd.Timedelta):
return obj.to_pytimedelta(), True
elif isinstance(obj, Mapping):
has_changed = not isinstance(obj, dict)
output = {}
for k, v in obj.items():
casted_v, has_changed_v = _cast_to_python_objects(
v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
has_changed |= has_changed_v
output[k] = casted_v
return output if has_changed else obj, has_changed
elif hasattr(obj, "__array__"):
return (
_cast_to_python_objects(
obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, (list, tuple)):
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt):
break
casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
if has_changed_first_elmt or not optimize_list_casting:
return (
[
_cast_to_python_objects(
elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for elmt in obj
],
True,
)
else:
if isinstance(obj, (list, tuple)):
return obj, False
else:
return list(obj), True
else:
return obj, False
else:
return obj, False
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
"""
Cast numpy/pytorch/tensorflow/pandas objects to python lists.
It works recursively.
If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast
only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
"""
return _cast_to_python_objects(
obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
@dataclass
class Value:
"""
Scalar feature value of a particular data type.
The possible dtypes of `Value` are as follows:
- `null`
- `bool`
- `int8`
- `int16`
- `int32`
- `int64`
- `uint8`
- `uint16`
- `uint32`
- `uint64`
- `float16`
- `float32` (alias float)
- `float64` (alias double)
- `time32[(s|ms)]`
- `time64[(us|ns)]`
- `timestamp[(s|ms|us|ns)]`
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
- `date32`
- `date64`
- `duration[(s|ms|us|ns)]`
- `decimal128(precision, scale)`
- `decimal256(precision, scale)`
- `binary`
- `large_binary`
- `string`
- `large_string`
Args:
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'stars': Value(dtype='int32')})
>>> features
{'stars': Value(dtype='int32', id=None)}
```
"""
dtype: str
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = field(default="Value", init=False, repr=False)
def __post_init__(self):
if self.dtype == "double": # fix inferred type
self.dtype = "float64"
if self.dtype == "float": # fix inferred type
self.dtype = "float32"
self.pa_type = string_to_arrow(self.dtype)
def __call__(self):
return self.pa_type
def encode_example(self, value):
if pa.types.is_boolean(self.pa_type):
return bool(value)
elif pa.types.is_integer(self.pa_type):
return int(value)
elif pa.types.is_floating(self.pa_type):
return float(value)
elif pa.types.is_string(self.pa_type):
return str(value)
else:
return value
class _ArrayXD:
def __post_init__(self):
self.shape = tuple(self.shape)
def __call__(self):
pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
return pa_type
def encode_example(self, value):
return value
@dataclass
class Array2D(_ArrayXD):
"""Create a two-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array2D", init=False, repr=False)
@dataclass
class Array3D(_ArrayXD):
"""Create a three-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array3D", init=False, repr=False)
@dataclass
class Array4D(_ArrayXD):
"""Create a four-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array4D", init=False, repr=False)
@dataclass
class Array5D(_ArrayXD):
"""Create a five-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array5D", init=False, repr=False)
class _ArrayXDExtensionType(pa.ExtensionType):
ndims: Optional[int] = None
def __init__(self, shape: tuple, dtype: str):
if self.ndims is None or self.ndims <= 1:
raise ValueError("You must instantiate an array type with a value for dim that is > 1")
if len(shape) != self.ndims:
raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
for dim in range(1, self.ndims):
if shape[dim] is None:
raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
self.shape = tuple(shape)
self.value_type = dtype
self.storage_dtype = self._generate_dtype(self.value_type)
pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
def __arrow_ext_serialize__(self):
return json.dumps((self.shape, self.value_type)).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
args = json.loads(serialized)
return cls(*args)
# This was added to pa.ExtensionType in pyarrow >= 13.0.0
def __reduce__(self):
return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
def __hash__(self):
return hash((self.__class__, self.shape, self.value_type))
def __arrow_ext_class__(self):
return ArrayExtensionArray
def _generate_dtype(self, dtype):
dtype = string_to_arrow(dtype)
for d in reversed(self.shape):
dtype = pa.list_(dtype)
# Don't specify the size of the list, since fixed length list arrays have issues
# being validated after slicing in pyarrow 0.17.1
return dtype
def to_pandas_dtype(self):
return PandasArrayExtensionDtype(self.value_type)
class Array2DExtensionType(_ArrayXDExtensionType):
ndims = 2
class Array3DExtensionType(_ArrayXDExtensionType):
ndims = 3
class Array4DExtensionType(_ArrayXDExtensionType):
ndims = 4
class Array5DExtensionType(_ArrayXDExtensionType):
ndims = 5
# Register the extension types for deserialization
pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
"""
When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
# zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
# primitive types are types for which the physical representation in arrow and in numpy
# https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
# see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
# and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
"""
def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
if pa.types.is_list(pa_type):
return _unnest_pa_type(pa_type.value_type)
return pa_type
if unnest:
pa_type = _unnest_pa_type(pa_type)
return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
class ArrayExtensionArray(pa.ExtensionArray):
def __array__(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
return self.to_numpy(zero_copy_only=zero_copy_only)
def __getitem__(self, i):
return self.storage[i]
def to_numpy(self, zero_copy_only=True):
storage: pa.ListArray = self.storage
null_mask = storage.is_null().to_numpy(zero_copy_only=False)
if self.type.shape[0] is not None:
size = 1
null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
for i in range(self.type.ndims):
size *= self.type.shape[i]
storage = storage.flatten()
numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
if len(null_indices):
numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
else:
shape = self.type.shape
ndims = self.type.ndims
arrays = []
first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
for i, is_null in enumerate(null_mask):
if is_null:
arrays.append(np.nan)
else:
storage_el = storage[i : i + 1]
first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
# flatten storage
for _ in range(ndims):
storage_el = storage_el.flatten()
numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
if len(np.unique(np.diff(first_dim_offsets))) > 1:
# ragged
numpy_arr = np.empty(len(arrays), dtype=object)
numpy_arr[:] = arrays
else:
numpy_arr = np.array(arrays)
return numpy_arr
def to_pylist(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
if self.type.shape[0] is None and numpy_arr.dtype == object:
return [arr.tolist() for arr in numpy_arr.tolist()]
else:
return numpy_arr.tolist()
class PandasArrayExtensionDtype(PandasExtensionDtype):
_metadata = "value_type"
def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
self._value_type = value_type
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
if isinstance(array, pa.ChunkedArray):
array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
return PandasArrayExtensionArray(numpy_arr)
@classmethod
def construct_array_type(cls):
return PandasArrayExtensionArray
@property
def type(self) -> type:
return np.ndarray
@property
def kind(self) -> str:
return "O"
@property
def name(self) -> str:
return f"array[{self.value_type}]"
@property
def value_type(self) -> np.dtype:
return self._value_type
class PandasArrayExtensionArray(PandasExtensionArray):
def __init__(self, data: np.ndarray, copy: bool = False):
self._data = data if not copy else np.array(data)
self._dtype = PandasArrayExtensionDtype(data.dtype)
def __array__(self, dtype=None):
"""
Convert to NumPy Array.
Note that Pandas expects a 1D array when dtype is set to object.
But for other dtypes, the returned shape is the same as the one of ``data``.
More info about pandas 1D requirement for PandasExtensionArray here:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
"""
if dtype == np.dtype(object):
out = np.empty(len(self._data), dtype=object)
for i in range(len(self._data)):
out[i] = self._data[i]
return out
if dtype is None:
return self._data
else:
return self._data.astype(dtype)
def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
return PandasArrayExtensionArray(self._data, copy=True)
@classmethod
def _from_sequence(
cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
) -> "PandasArrayExtensionArray":
if len(scalars) > 1 and all(
isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
):
data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
else:
data = np.empty(len(scalars), dtype=object)
data[:] = scalars
return cls(data, copy=copy)
@classmethod
def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
if len(to_concat) > 1 and all(
va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
for va in to_concat
):
data = np.vstack([va._data for va in to_concat])
else:
data = np.empty(len(to_concat), dtype=object)
data[:] = [va._data for va in to_concat]
return cls(data, copy=False)
@property
def dtype(self) -> PandasArrayExtensionDtype:
return self._dtype
@property
def nbytes(self) -> int:
return self._data.nbytes
def isna(self) -> np.ndarray:
return np.array([pd.isna(arr).any() for arr in self._data])
def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
raise NotImplementedError()
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
if isinstance(item, int):
return self._data[item]
return PandasArrayExtensionArray(self._data[item], copy=False)
def take(
self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
) -> "PandasArrayExtensionArray":
indices: np.ndarray = np.asarray(indices, dtype=int)
if allow_fill:
fill_value = (
self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
)
mask = indices == -1
if (indices < -1).any():
raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
elif len(self) > 0:
pass
elif not np.all(mask):
raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
else:
data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
return PandasArrayExtensionArray(data, copy=False)
took = self._data.take(indices, axis=0)
if allow_fill and mask.any():
took[mask] = [fill_value] * np.sum(mask)
return PandasArrayExtensionArray(took, copy=False)
def __len__(self) -> int:
return len(self._data)
def __eq__(self, other) -> np.ndarray:
if not isinstance(other, PandasArrayExtensionArray):
raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
return (self._data == other._data).all()
def pandas_types_mapper(dtype):
if isinstance(dtype, _ArrayXDExtensionType):
return PandasArrayExtensionDtype(dtype.value_type)
@dataclass
class ClassLabel:
"""Feature type for integer class labels.
There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
* `num_classes`: Create 0 to (num_classes-1) labels.
* `names`: List of label strings.
* `names_file`: File containing the list of labels.
Under the hood the labels are stored as integers.
You can use negative integers to represent unknown/missing labels.
Args:
num_classes (`int`, *optional*):
Number of classes. All labels must be < `num_classes`.
names (`list` of `str`, *optional*):
String names for the integer classes.
The order in which the names are provided is kept.
names_file (`str`, *optional*):
Path to a file with names for the integer classes, one per line.
Example:
```py
>>> from datasets import Features, ClassLabel
>>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
>>> features
{'label': ClassLabel(names=['bad', 'ok', 'good'], id=None)}
```
"""
num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
names: List[str] = None
names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "int64"
pa_type: ClassVar[Any] = pa.int64()
_str2int: ClassVar[Dict[str, int]] = None
_int2str: ClassVar[Dict[int, int]] = None
_type: str = field(default="ClassLabel", init=False, repr=False)
def __post_init__(self, num_classes, names_file):
self.num_classes = num_classes
self.names_file = names_file
if self.names_file is not None and self.names is not None:
raise ValueError("Please provide either names or names_file but not both.")
# Set self.names
if self.names is None:
if self.names_file is not None:
self.names = self._load_names_from_file(self.names_file)
elif self.num_classes is not None:
self.names = [str(i) for i in range(self.num_classes)]
else:
raise ValueError("Please provide either num_classes, names or names_file.")
elif not isinstance(self.names, SequenceABC):
raise TypeError(f"Please provide names as a list, is {type(self.names)}")
# Set self.num_classes
if self.num_classes is None:
self.num_classes = len(self.names)
elif self.num_classes != len(self.names):
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
f"Got {len(self.names)} names VS {self.num_classes} num_classes"
)
# Prepare mappings
self._int2str = [str(name) for name in self.names]
self._str2int = {name: i for i, name in enumerate(self._int2str)}
if len(self._int2str) != len(self._str2int):
raise ValueError("Some label names are duplicated. Each label name should be unique.")
def __call__(self):
return self.pa_type
def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
"""Conversion class name `string` => `integer`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].str2int('neg')
0
```
"""
if not isinstance(values, str) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, str):
values = [values]
return_list = False
output = [self._strval2int(value) for value in values]
return output if return_list else output[0]
def _strval2int(self, value: str) -> int:
failed_parse = False
value = str(value)
# first attempt - raw string value
int_value = self._str2int.get(value)
if int_value is None:
# second attempt - strip whitespace
int_value = self._str2int.get(value.strip())
if int_value is None:
# third attempt - convert str to int
try:
int_value = int(value)
except ValueError:
failed_parse = True
else:
if int_value < -1 or int_value >= self.num_classes:
failed_parse = True
if failed_parse:
raise ValueError(f"Invalid string class label {value}")
return int_value
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
"""Conversion `integer` => class name `string`.
Regarding unknown/missing labels: passing negative integers raises `ValueError`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].int2str(0)
'neg'
```
"""
if not isinstance(values, int) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, int):
values = [values]
return_list = False
for v in values:
if not 0 <= v < self.num_classes:
raise ValueError(f"Invalid integer class label {v:d}")
output = [self._int2str[int(v)] for v in values]
return output if return_list else output[0]
def encode_example(self, example_data):
if self.num_classes is None:
raise ValueError(
"Trying to use ClassLabel feature with undefined number of class. "
"Please set ClassLabel.names or num_classes."
)
# If a string is given, convert to associated integer
if isinstance(example_data, str):
example_data = self.str2int(example_data)
# Allowing -1 to mean no label.
if not -1 <= example_data < self.num_classes:
raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
return example_data
def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
- `pa.string()`
- `pa.int()`
Args:
storage (`Union[pa.StringArray, pa.IntegerArray]`):
PyArrow array to cast.
Returns:
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
"""
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
min_max = pc.min_max(storage).as_py()
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
raise ValueError(
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
)
elif isinstance(storage, pa.StringArray):
storage = pa.array(
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
)
return array_cast(storage, self.pa_type)
@staticmethod
def _load_names_from_file(names_filepath):
with open(names_filepath, encoding="utf-8") as f:
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
@dataclass
class Sequence:
"""Construct a list of feature from a single type or a dict of types.
Mostly here for compatiblity with tfds.
Args:
feature ([`FeatureType`]):
A list of features of a single type or a dictionary of types.
length (`int`):
Length of the sequence.
Example:
```py
>>> from datasets import Features, Sequence, Value, ClassLabel
>>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
>>> features
{'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(names=['hot', 'cold'], id=None)}, length=-1, id=None)}
```
"""
feature: Any
length: int = -1
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "list"
pa_type: ClassVar[Any] = None
_type: str = field(default="Sequence", init=False, repr=False)
@dataclass
class LargeList:
"""Feature type for large list data composed of child feature data type.
It is backed by `pyarrow.LargeListType`, which is like `pyarrow.ListType` but with 64-bit rather than 32-bit offsets.
Args:
feature ([`FeatureType`]):
Child feature data type of each item within the large list.
"""
feature: Any
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = field(default="LargeList", init=False, repr=False)
FeatureType = Union[
dict,
list,
tuple,
Value,
ClassLabel,
Translation,
TranslationVariableLanguages,
LargeList,
Sequence,
Array2D,
Array3D,
Array4D,
Array5D,
Audio,
Image,
Video,
]
def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
"""
Check if the object is not None.
If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
"""
if obj is None:
return False
elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, LargeList, Sequence))):
if len(obj) > 0:
if schema is None:
pass
elif isinstance(schema, (list, tuple)):
schema = schema[0]
else:
schema = schema.feature
return _check_non_null_non_empty_recursive(obj[0], schema)
else:
return False
else:
return True
def get_nested_type(schema: FeatureType) -> pa.DataType:
"""
get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
generate_from_arrow_type().
It performs double-duty as the implementation of Features.type and handles the conversion of
datasets.Feature->pa.struct
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, Features):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # Features is subclass of dict, and dict order is deterministic since Python 3.6
elif isinstance(schema, dict):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # however don't sort on struct types since the order matters
elif isinstance(schema, (list, tuple)):
if len(schema) != 1:
raise ValueError("When defining list feature, you should just provide one example of the inner type")
value_type = get_nested_type(schema[0])
return pa.list_(value_type)
elif isinstance(schema, LargeList):
value_type = get_nested_type(schema.feature)
return pa.large_list(value_type)
elif isinstance(schema, Sequence):
value_type = get_nested_type(schema.feature)
# We allow to reverse list of dict => dict of list for compatibility with tfds
if isinstance(schema.feature, dict):
data_type = pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
else:
data_type = pa.list_(value_type, schema.length)
return data_type
# Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
return schema()
def encode_nested_example(schema, obj, level=0):
"""Encode a nested example.
This is used since some features (in particular ClassLabel) have some logic during encoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
if level == 0 and obj is None:
raise ValueError("Got None but expected a dictionary instead")
return (
{k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
elif isinstance(obj, np.ndarray):
return encode_nested_example(schema, obj.tolist())
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
return list(obj)
elif isinstance(schema, LargeList):
if obj is None:
return None
else:
if len(obj) > 0:
sub_schema = schema.feature
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
if obj is None:
return None
# We allow to reverse list of dict => dict of list for compatibility with tfds
if isinstance(schema.feature, dict):
# dict of list to fill
list_dict = {}
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k in schema.feature:
list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
return list_dict
else:
# obj is a single dict
for k in schema.feature:
list_dict[k] = (
[encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
if k in obj
else None
)
return list_dict
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
break
# be careful when comparing tensors here
if (
not isinstance(first_elmt, list)
or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
):
return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
return list(obj)
# Object with special encoding:
# ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
elif hasattr(schema, "encode_example"):
return schema.encode_example(obj) if obj is not None else None
# Other object should be directly convertible to a native Arrow type (like Translation and Translation)
return obj
def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode a nested example.
This is used since some features (in particular Audio and Image) have some logic during decoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return (
{k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if decode_nested_example(sub_schema, first_elmt) != first_elmt:
return [decode_nested_example(sub_schema, o) for o in obj]
return list(obj)
elif isinstance(schema, LargeList):
if obj is None:
return None
else:
sub_schema = schema.feature
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if decode_nested_example(sub_schema, first_elmt) != first_elmt:
return [decode_nested_example(sub_schema, o) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatibility with tfds
if isinstance(schema.feature, dict):
return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
else:
return decode_nested_example([schema.feature], obj)
# Object with special decoding:
elif hasattr(schema, "decode_example") and getattr(schema, "decode", True):
# we pass the token to read and decode files from private repositories in streaming mode
return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) if obj is not None else None
return obj
_FEATURE_TYPES: Dict[str, FeatureType] = {
Value.__name__: Value,
ClassLabel.__name__: ClassLabel,
Translation.__name__: Translation,
TranslationVariableLanguages.__name__: TranslationVariableLanguages,
LargeList.__name__: LargeList,
Sequence.__name__: Sequence,
Array2D.__name__: Array2D,
Array3D.__name__: Array3D,
Array4D.__name__: Array4D,
Array5D.__name__: Array5D,
Audio.__name__: Audio,
Image.__name__: Image,
Video.__name__: Video,
}
@experimental
def register_feature(
feature_cls: type,
feature_type: str,
):
"""
Register a Feature object using a name and class.
This function must be used on a Feature class.
"""
if feature_type in _FEATURE_TYPES:
logger.warning(
f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})"
)
_FEATURE_TYPES[feature_type] = feature_cls
def generate_from_dict(obj: Any):
"""Regenerate the nested feature object from a deserialized dict.
We use the '_type' fields to get the dataclass name to load.
generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
:meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
that :class:`Value` automatically performs.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(obj, list):
return [generate_from_dict(value) for value in obj]
# Otherwise we have a dict or a dataclass
if "_type" not in obj or isinstance(obj["_type"], dict):
return {key: generate_from_dict(value) for key, value in obj.items()}
obj = dict(obj)
_type = obj.pop("_type")
class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None)
if class_type is None:
raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}")
if class_type == LargeList:
feature = obj.pop("feature")
return LargeList(feature=generate_from_dict(feature), **obj)
if class_type == Sequence:
feature = obj.pop("feature")
return Sequence(feature=generate_from_dict(feature), **obj)
field_names = {f.name for f in fields(class_type)}
return class_type(**{k: v for k, v in obj.items() if k in field_names})
def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
"""
generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
a single field.
This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
"""
if isinstance(pa_type, pa.StructType):
return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
elif isinstance(pa_type, pa.FixedSizeListType):
return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
elif isinstance(pa_type, pa.ListType):
feature = generate_from_arrow_type(pa_type.value_type)
if isinstance(feature, (dict, tuple, list)):
return [feature]
return Sequence(feature=feature)
elif isinstance(pa_type, pa.LargeListType):
feature = generate_from_arrow_type(pa_type.value_type)
return LargeList(feature=feature)
elif isinstance(pa_type, _ArrayXDExtensionType):
array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
elif isinstance(pa_type, pa.DataType):
return Value(dtype=_arrow_to_datasets_dtype(pa_type))
else:
raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a multidimensional NumPy array"""
arr = np.array(arr)
values = pa.array(arr.flatten(), type=type)
for i in range(arr.ndim - 1):
n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
step_offsets = arr.shape[arr.ndim - i - 1]
offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
values = pa.ListArray.from_arrays(offsets, values)
return values
def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
null_mask = np.array([arr is None for arr in l_arr])
null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
l_arr = [arr for arr in l_arr if arr is not None]
offsets = np.cumsum(
[0] + [len(arr) for arr in l_arr], dtype=object
) # convert to dtype object to allow None insertion
offsets = np.insert(offsets, null_indices, None)
offsets = pa.array(offsets, type=pa.int32())
values = pa.concat_arrays(l_arr)
return pa.ListArray.from_arrays(offsets, values)
def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
if len(l_arr) > 0:
return list_of_pa_arrays_to_pyarrow_listarray(
[numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
)
else:
return pa.array([], type=type)
def contains_any_np_array(data: Any):
"""Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
Args:
data (Any): Data.
Returns:
bool
"""
if isinstance(data, np.ndarray):
return True
elif isinstance(data, list):
return contains_any_np_array(first_non_null_value(data)[1])
else:
return False
def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
"""Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
Args:
data (Union[np.ndarray, List]): Data.
type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
Returns:
pa.ListArray
"""
if isinstance(data, np.ndarray):
return numpy_to_pyarrow_listarray(data, type=type)
elif isinstance(data, list):
return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
"""Convert to PyArrow ListArray.
Args:
data (Any): Sequence, iterable, np.ndarray or pd.Series.
pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
Returns:
pyarrow.Array
"""
if contains_any_np_array(data):
return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
else:
return pa.array(data, pa_type.storage_dtype)
def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
"""Visit a (possibly nested) feature.
Args:
feature (FeatureType): the feature type to be checked
Returns:
visited feature (FeatureType)
"""
if isinstance(feature, dict):
out = func({k: _visit(f, func) for k, f in feature.items()})
elif isinstance(feature, (list, tuple)):
out = func([_visit(feature[0], func)])
elif isinstance(feature, LargeList):
out = func(LargeList(_visit(feature.feature, func)))
elif isinstance(feature, Sequence):
out = func(Sequence(_visit(feature.feature, func), length=feature.length))
else:
out = func(feature)
return feature if out is None else out
def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
"""Check if a (possibly nested) feature requires decoding.
Args:
feature (FeatureType): the feature type to be checked
ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
of the `decode` attribute of the decodable feature types.
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_decoding(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_decoding(feature[0])
elif isinstance(feature, LargeList):
return require_decoding(feature.feature)
elif isinstance(feature, Sequence):
return require_decoding(feature.feature)
else:
return hasattr(feature, "decode_example") and (
getattr(feature, "decode", True) if not ignore_decode_attribute else True
)
def require_storage_cast(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires storage casting.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, LargeList):
return require_storage_cast(feature.feature)
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "cast_storage")
def require_storage_embed(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires embedding data into storage.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, LargeList):
return require_storage_cast(feature.feature)
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "embed_storage")
def keep_features_dicts_synced(func):
"""
Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
in sync with the main dictionary.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Features" = args[0]
args = args[1:]
else:
self: "Features" = kwargs.pop("self")
out = func(self, *args, **kwargs)
assert hasattr(self, "_column_requires_decoding")
self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
return out
wrapper._decorator_name_ = "_keep_dicts_synced"
return wrapper
class Features(dict):
"""A special dictionary that defines the internal structure of a dataset.
Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
and values are the type of that column.
`FieldType` can be one of the following:
- [`Value`] feature specifies a single data type value, e.g. `int64` or `string`.
- [`ClassLabel`] feature specifies a predefined set of classes which can have labels associated to them and
will be stored as integers in the dataset.
- Python `dict` specifies a composite feature containing a mapping of sub-fields to sub-features.
It's possible to have nested fields of nested fields in an arbitrary manner.
- Python `list`, [`LargeList`] or [`Sequence`] specifies a composite feature containing a sequence of
sub-features, all of the same feature type.
<Tip>
A [`Sequence`] with an internal dictionary feature will be automatically converted into a dictionary of
lists. This behavior is implemented to have a compatibility layer with the TensorFlow Datasets library but may be
un-wanted in some cases. If you don't want this behavior, you can use a Python `list` or a [`LargeList`]
instead of the [`Sequence`].
</Tip>
- [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
- [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
- [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key).
This feature extracts the image data.
- [`Translation`] or [`TranslationVariableLanguages`] feature specific to Machine Translation.
"""
def __init__(*args, **kwargs):
# self not in the signature to allow passing self as a kwarg
if not args:
raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
self, *args = args
super(Features, self).__init__(*args, **kwargs)
self._column_requires_decoding: Dict[str, bool] = {
col: require_decoding(feature) for col, feature in self.items()
}
__setitem__ = keep_features_dicts_synced(dict.__setitem__)
__delitem__ = keep_features_dicts_synced(dict.__delitem__)
update = keep_features_dicts_synced(dict.update)
setdefault = keep_features_dicts_synced(dict.setdefault)
pop = keep_features_dicts_synced(dict.pop)
popitem = keep_features_dicts_synced(dict.popitem)
clear = keep_features_dicts_synced(dict.clear)
def __reduce__(self):
return Features, (dict(self),)
@property
def type(self):
"""
Features field types.
Returns:
:obj:`pyarrow.DataType`
"""
return get_nested_type(self)
@property
def arrow_schema(self):
"""
Features schema.
Returns:
:obj:`pyarrow.Schema`
"""
hf_metadata = {"info": {"features": self.to_dict()}}
return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
@classmethod
def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
"""
Construct [`Features`] from Arrow Schema.
It also checks the schema metadata for Hugging Face Datasets features.
Non-nullable fields are not supported and set to nullable.
Also, pa.dictionary is not supported and it uses its underlying type instead.
Therefore datasets convert DictionaryArray objects to their actual values.
Args:
pa_schema (`pyarrow.Schema`):
Arrow Schema.
Returns:
[`Features`]
"""
# try to load features from the arrow schema metadata
metadata_features = Features()
if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
metadata_features = Features.from_dict(metadata["info"]["features"])
metadata_features_schema = metadata_features.arrow_schema
obj = {
field.name: (
metadata_features[field.name]
if field.name in metadata_features and metadata_features_schema.field(field.name) == field
else generate_from_arrow_type(field.type)
)
for field in pa_schema
}
return cls(**obj)
@classmethod
def from_dict(cls, dic) -> "Features":
"""
Construct [`Features`] from dict.
Regenerate the nested feature object from a deserialized dict.
We use the `_type` key to infer the dataclass name of the feature `FieldType`.
It allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
[`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
dtypes that [`Value`] automatically performs.
Args:
dic (`dict[str, Any]`):
Python dictionary.
Returns:
`Features`
Example::
>>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
{'_type': Value(dtype='string', id=None)}
"""
obj = generate_from_dict(dic)
return cls(**obj)
def to_dict(self):
return asdict(self)
def _to_yaml_list(self) -> list:
# we compute the YAML list from the dict representation that is used for JSON dump
yaml_data = self.to_dict()
def simplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
for list_type in ["large_list", "list", "sequence"]:
#
# list_type: -> list_type: int32
# dtype: int32 ->
#
if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["dtype"]:
feature[list_type] = feature[list_type]["dtype"]
#
# list_type: -> list_type:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["struct"]:
feature[list_type] = feature[list_type]["struct"]
#
# class_label: -> class_label:
# names: -> names:
# - negative -> '0': negative
# - positive -> '1': positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
# server-side requirement: keys must be strings
feature["class_label"]["names"] = {
str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
}
return feature
def to_yaml_inner(obj: Union[dict, list]) -> dict:
if isinstance(obj, dict):
_type = obj.pop("_type", None)
if _type == "LargeList":
_feature = obj.pop("feature")
return simplify({"large_list": to_yaml_inner(_feature), **obj})
elif _type == "Sequence":
_feature = obj.pop("feature")
return simplify({"sequence": to_yaml_inner(_feature), **obj})
elif _type == "Value":
return obj
elif _type and not obj:
return {"dtype": camelcase_to_snakecase(_type)}
elif _type:
return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
else:
return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
elif isinstance(obj, list):
return simplify({"list": simplify(to_yaml_inner(obj[0]))})
elif isinstance(obj, tuple):
return to_yaml_inner(list(obj))
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
def to_yaml_types(obj: dict) -> dict:
if isinstance(obj, dict):
return {k: to_yaml_types(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [to_yaml_types(v) for v in obj]
elif isinstance(obj, tuple):
return to_yaml_types(list(obj))
else:
return obj
return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "Features":
yaml_data = copy.deepcopy(yaml_data)
# we convert the list obtained from YAML data into the dict representation that is used for JSON dump
def unsimplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
for list_type in ["large_list", "list", "sequence"]:
#
# list_type: int32 -> list_type:
# -> dtype: int32
#
if isinstance(feature.get(list_type), str):
feature[list_type] = {"dtype": feature[list_type]}
#
# class_label: -> class_label:
# names: -> names:
# '0': negative -> - negative
# '1': positive -> - positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
label_ids = sorted(feature["class_label"]["names"], key=int)
if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
raise ValueError(
f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
)
feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
return feature
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
if isinstance(obj, dict):
if not obj:
return {}
_type = next(iter(obj))
if _type == "large_list":
_feature = unsimplify(obj).pop(_type)
return {"feature": from_yaml_inner(_feature), **obj, "_type": "LargeList"}
if _type == "sequence":
_feature = unsimplify(obj).pop(_type)
return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
if _type == "list":
return [from_yaml_inner(unsimplify(obj)[_type])]
if _type == "struct":
return from_yaml_inner(obj["struct"])
elif _type == "dtype":
if isinstance(obj["dtype"], str):
# e.g. int32, float64, string, audio, image
try:
Value(obj["dtype"])
return {**obj, "_type": "Value"}
except ValueError:
# e.g. Audio, Image, ArrayXD
return {"_type": snakecase_to_camelcase(obj["dtype"])}
else:
return from_yaml_inner(obj["dtype"])
else:
return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
elif isinstance(obj, list):
names = [_feature.pop("name") for _feature in obj]
return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
return cls.from_dict(from_yaml_inner(yaml_data))
def encode_example(self, example):
"""
Encode example into a format for Arrow.
Args:
example (`dict[str, Any]`):
Data in a Dataset row.
Returns:
`dict[str, Any]`
"""
example = cast_to_python_objects(example)
return encode_nested_example(self, example)
def encode_column(self, column, column_name: str):
"""
Encode column into a format for Arrow.
Args:
column (`list[Any]`):
Data in a Dataset column.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
column = cast_to_python_objects(column)
return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
def encode_batch(self, batch):
"""
Encode batch into a format for Arrow.
Args:
batch (`dict[str, list[Any]]`):
Data in a Dataset batch.
Returns:
`dict[str, list[Any]]`
"""
encoded_batch = {}
if set(batch) != set(self):
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
for key, column in batch.items():
column = cast_to_python_objects(column)
encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
return encoded_batch
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode example with custom feature decoding.
Args:
example (`dict[str, Any]`):
Dataset row data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary `repo_id (str) -> token (bool or str)`.
Returns:
`dict[str, Any]`
"""
return {
column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
if self._column_requires_decoding[column_name]
else value
for column_name, (feature, value) in zip_dict(
{key: value for key, value in self.items() if key in example}, example
)
}
def decode_column(self, column: list, column_name: str):
"""Decode column with custom feature decoding.
Args:
column (`list[Any]`):
Dataset column data.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
return (
[decode_nested_example(self[column_name], value) if value is not None else None for value in column]
if self._column_requires_decoding[column_name]
else column
)
def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode batch with custom feature decoding.
Args:
batch (`dict[str, list[Any]]`):
Dataset batch data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary repo_id (str) -> token (bool or str)
Returns:
`dict[str, list[Any]]`
"""
decoded_batch = {}
for column_name, column in batch.items():
decoded_batch[column_name] = (
[
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
if value is not None
else None
for value in column
]
if self._column_requires_decoding[column_name]
else column
)
return decoded_batch
def copy(self) -> "Features":
"""
Make a deep copy of [`Features`].
Returns:
[`Features`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> copy_of_features = ds.features.copy()
>>> copy_of_features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return copy.deepcopy(self)
def reorder_fields_as(self, other: "Features") -> "Features":
"""
Reorder Features fields to match the field order of other [`Features`].
The order of the fields is important since it matters for the underlying arrow data.
Re-ordering the fields allows to make the underlying arrow data type match.
Args:
other ([`Features`]):
The other [`Features`] to align with.
Returns:
[`Features`]
Example::
>>> from datasets import Features, Sequence, Value
>>> # let's say we have two features with a different order of nested fields (for a and b for example)
>>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
>>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
>>> assert f1.type != f2.type
>>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but makes the fields order match
>>> f1.reorder_fields_as(f2)
{'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
>>> assert f1.reorder_fields_as(f2).type == f2.type
"""
def recursive_reorder(source, target, stack=""):
stack_position = " at " + stack[1:] if stack else ""
if isinstance(target, Sequence):
target = target.feature
if isinstance(target, dict):
target = {k: [v] for k, v in target.items()}
else:
target = [target]
if isinstance(source, Sequence):
sequence_kwargs = vars(source).copy()
source = sequence_kwargs.pop("feature")
if isinstance(source, dict):
source = {k: [v] for k, v in source.items()}
reordered = recursive_reorder(source, target, stack)
return Sequence({k: v[0] for k, v in reordered.items()}, **sequence_kwargs)
else:
source = [source]
reordered = recursive_reorder(source, target, stack)
return Sequence(reordered[0], **sequence_kwargs)
elif isinstance(source, dict):
if not isinstance(target, dict):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if sorted(source) != sorted(target):
message = (
f"Keys mismatch: between {source} (source) and {target} (target).\n"
f"{source.keys() - target.keys()} are missing from target "
f"and {target.keys() - source.keys()} are missing from source" + stack_position
)
raise ValueError(message)
return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
elif isinstance(source, list):
if not isinstance(target, list):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if len(source) != len(target):
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
elif isinstance(source, LargeList):
if not isinstance(target, LargeList):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
return LargeList(recursive_reorder(source.feature, target.feature, stack))
else:
return source
return Features(recursive_reorder(self, other))
def flatten(self, max_depth=16) -> "Features":
"""Flatten the features. Every dictionary column is removed and is replaced by
all the subfields it contains. The new fields are named by concatenating the
name of the original column and the subfield name like this: `<original>.<subfield>`.
If a column contains nested dictionaries, then all the lower-level subfields names are
also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
Returns:
[`Features`]:
The flattened features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad", split="train")
>>> ds.features.flatten()
{'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
"""
for depth in range(1, max_depth):
no_change = True
flattened = self.copy()
for column_name, subfeature in self.items():
if isinstance(subfeature, dict):
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
del flattened[column_name]
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
no_change = False
flattened.update(
{
f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
for k, v in subfeature.feature.items()
}
)
del flattened[column_name]
elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
del flattened[column_name]
self = flattened
if no_change:
break
return self
def _align_features(features_list: List[Features]) -> List[Features]:
"""Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k in name2feature and isinstance(v, dict):
# Recursively align features.
name2feature[k] = _align_features([name2feature[k], v])[0]
elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
def _check_if_features_can_be_aligned(features_list: List[Features]):
"""Check if the dictionaries of features can be aligned.
Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
"""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
for features in features_list:
for k, v in features.items():
if isinstance(v, dict) and isinstance(name2feature[k], dict):
# Deep checks for structure.
_check_if_features_can_be_aligned([name2feature[k], v])
elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
raise ValueError(
f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
)
| datasets/src/datasets/features/features.py/0 | {
"file_path": "datasets/src/datasets/features/features.py",
"repo_id": "datasets",
"token_count": 41357
} |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""List and inspect datasets."""
import os
from typing import Dict, List, Mapping, Optional, Sequence, Union
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadMode
from .download.streaming_download_manager import StreamingDownloadManager
from .info import DatasetInfo
from .load import (
dataset_module_factory,
get_dataset_builder_class,
load_dataset_builder,
)
from .utils.logging import get_logger
from .utils.version import Version
logger = get_logger(__name__)
class SplitsNotFoundError(ValueError):
pass
def get_dataset_infos(
path: str,
data_files: Optional[Union[Dict, List, str]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
**config_kwargs,
):
"""Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
Args:
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]),
e.g. `'squad'`, `'glue'` or``'openai/webtext'`
revision (`Union[str, datasets.Version]`, *optional*):
If specified, the dataset module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the main branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
data_files (`Union[Dict, List, str]`, *optional*):
Defining the data_files of the dataset configuration.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
**config_kwargs (additional keyword arguments):
Optional attributes for builder class which will override the attributes if supplied.
Example:
```py
>>> from datasets import get_dataset_infos
>>> get_dataset_infos('rotten_tomatoes')
{'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
```
"""
config_names = get_dataset_config_names(
path=path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
data_files=data_files,
token=token,
)
return {
config_name: get_dataset_config_info(
path=path,
config_name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
**config_kwargs,
)
for config_name in config_names
}
def get_dataset_config_names(
path: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
data_files: Optional[Union[Dict, List, str]] = None,
**download_kwargs,
):
"""Get the list of available config names for a particular dataset.
Args:
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]),
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
revision (`Union[str, datasets.Version]`, *optional*):
If specified, the dataset module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the main branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
By default the datasets are stored inside the `datasets_modules` module.
data_files (`Union[Dict, List, str]`, *optional*):
Defining the data_files of the dataset configuration.
**download_kwargs (additional keyword arguments):
Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
for example `token`.
Example:
```py
>>> from datasets import get_dataset_config_names
>>> get_dataset_config_names("glue")
['cola',
'sst2',
'mrpc',
'qqp',
'stsb',
'mnli',
'mnli_mismatched',
'mnli_matched',
'qnli',
'rte',
'wnli',
'ax']
```
"""
dataset_module = dataset_module_factory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
data_files=data_files,
**download_kwargs,
)
builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
return list(builder_cls.builder_configs.keys()) or [
dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default")
]
def get_dataset_default_config_name(
path: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
data_files: Optional[Union[Dict, List, str]] = None,
**download_kwargs,
) -> Optional[str]:
"""Get the default config name for a particular dataset.
Can return None only if the dataset has multiple configurations and no default configuration.
Args:
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]),
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
revision (`Union[str, datasets.Version]`, *optional*):
If specified, the dataset module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the main branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
By default the datasets are stored inside the `datasets_modules` module.
data_files (`Union[Dict, List, str]`, *optional*):
Defining the data_files of the dataset configuration.
**download_kwargs (additional keyword arguments):
Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
for example `token`.
Returns:
Optional[str]: the default config name if there is one
Example:
```py
>>> from datasets import get_dataset_default_config_name
>>> get_dataset_default_config_name("openbookqa")
'main'
```
"""
dataset_module = dataset_module_factory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
data_files=data_files,
**download_kwargs,
)
builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
builder_configs = list(builder_cls.builder_configs.keys())
if builder_configs:
default_config_name = builder_configs[0] if len(builder_configs) == 1 else None
else:
default_config_name = "default"
return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
def get_dataset_config_info(
path: str,
config_name: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
**config_kwargs,
) -> DatasetInfo:
"""Get the meta information (DatasetInfo) about a dataset for a particular config
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]),
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, or not specified, will get token from `"~/.huggingface"`.
**config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
"""
builder = load_dataset_builder(
path,
name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
**config_kwargs,
)
info = builder.info
if info.splits is None:
download_config = download_config.copy() if download_config else DownloadConfig()
if token is not None:
download_config.token = token
builder._check_manual_download(
StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
)
try:
info.splits = {
split_generator.name: {"name": split_generator.name, "dataset_name": path}
for split_generator in builder._split_generators(
StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
)
}
except Exception as err:
raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
return info
def get_dataset_split_names(
path: str,
config_name: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
**config_kwargs,
):
"""Get the list of available splits for a particular config and dataset.
Args:
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]),
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
config_name (`str`, *optional*):
Defining the name of the dataset configuration.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
revision ([`Version`] or `str`, *optional*):
Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
**config_kwargs (additional keyword arguments):
Optional attributes for builder class which will override the attributes if supplied.
Example:
```py
>>> from datasets import get_dataset_split_names
>>> get_dataset_split_names('rotten_tomatoes')
['train', 'validation', 'test']
```
"""
info = get_dataset_config_info(
path,
config_name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
**config_kwargs,
)
return list(info.splits.keys())
| datasets/src/datasets/inspect.py/0 | {
"file_path": "datasets/src/datasets/inspect.py",
"repo_id": "datasets",
"token_count": 6338
} |
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
try:
reader = pa.ipc.open_stream(f)
except (OSError, pa.lib.ArrowInvalid):
reader = pa.ipc.open_file(f)
self.info.features = datasets.Features.from_arrow_schema(reader.schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
try:
batches = pa.ipc.open_stream(f)
except (OSError, pa.lib.ArrowInvalid):
reader = pa.ipc.open_file(f)
batches = (reader.get_batch(i) for i in range(reader.num_record_batches))
for batch_idx, record_batch in enumerate(batches):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
| datasets/src/datasets/packaged_modules/arrow/arrow.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/arrow/arrow.py",
"repo_id": "datasets",
"token_count": 1641
} |
import itertools
import warnings
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
warnings.warn(
"The Pandas builder is deprecated and will be removed in the next major version of datasets.",
FutureWarning,
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
| datasets/src/datasets/packaged_modules/pandas/pandas.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/pandas/pandas.py",
"repo_id": "datasets",
"token_count": 1040
} |
from functools import partial
from huggingface_hub import hf_hub_url
from huggingface_hub.utils import get_session, hf_raise_for_status
hf_dataset_url = partial(hf_hub_url, repo_type="dataset")
def check_auth(hf_api, repo_id, token=None):
headers = hf_api._build_hf_headers(token=token)
path = f"{hf_api.endpoint}/api/datasets/{repo_id}/auth-check"
r = get_session().get(path, headers=headers)
hf_raise_for_status(r)
| datasets/src/datasets/utils/hub.py/0 | {
"file_path": "datasets/src/datasets/utils/hub.py",
"repo_id": "datasets",
"token_count": 180
} |
from collections.abc import Iterator
from typing import Iterable
class tracked_str(str):
origins = {}
def set_origin(self, origin: str):
if super().__repr__() not in self.origins:
self.origins[super().__repr__()] = origin
def get_origin(self):
return self.origins.get(super().__repr__(), str(self))
def __repr__(self) -> str:
if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self:
return super().__repr__()
else:
return f"{str(self)} (origin={self.origins[super().__repr__()]})"
class tracked_list(list):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.last_item = None
def __iter__(self) -> Iterator:
for x in super().__iter__():
self.last_item = x
yield x
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
return super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})"
class TrackedIterableFromGenerator(Iterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator, *args):
super().__init__()
self.generator = generator
self.args = args
self.last_item = None
def __iter__(self):
for x in self.generator(*self.args):
self.last_item = x
yield x
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
return super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})"
def __reduce__(self):
return (self.__class__, (self.generator, *self.args))
| datasets/src/datasets/utils/track.py/0 | {
"file_path": "datasets/src/datasets/utils/track.py",
"repo_id": "datasets",
"token_count": 827
} |
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesList
from datasets.packaged_modules.json.json import Json, JsonConfig
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
# ndjson format is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
@pytest.fixture
def ndjson_file(tmp_path):
filename = tmp_path / "file.ndjson"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def jsonl_file_utf16_encoded(tmp_path):
filename = tmp_path / "file_utf16_encoded.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w", encoding="utf-16") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_strings(tmp_path):
filename = tmp_path / "file_with_list_of_strings.json"
data = textwrap.dedent(
"""\
[
"First text.",
"Second text.",
"Third text."
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_strings_field(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
"First text.",
"Second text.",
"Third text."
]
}
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
@pytest.fixture
def json_file_with_dict_of_lists_field(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": {
"col_1": [-1, 1, 10],
"col_2": [null, 2, 20]
}
}
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
@pytest.fixture
def json_file_with_list_of_dicts_with_sorted_columns(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
[
{"ID": 0, "Language": "Language-0", "Topic": "Topic-0"},
{"ID": 1, "Language": "Language-1", "Topic": "Topic-1"},
{"ID": 2, "Language": "Language-2", "Topic": "Topic-2"}
]
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
@pytest.fixture
def json_file_with_list_of_dicts_with_sorted_columns_field(tmp_path):
path = tmp_path / "file.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"ID": 0, "Language": "Language-0", "Topic": "Topic-0"},
{"ID": 1, "Language": "Language-1", "Topic": "Topic-1"},
{"ID": 2, "Language": "Language-2", "Topic": "Topic-2"}
]
}
"""
)
with open(path, "w") as f:
f.write(data)
return str(path)
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = JsonConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = JsonConfig(name="name", data_files=data_files)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("ndjson_file", {}),
("jsonl_file_utf16_encoded", {"encoding": "utf-16"}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
("json_file_with_list_of_strings", {}),
("json_file_with_list_of_strings_field", {"field": "field3"}),
("json_file_with_dict_of_lists_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
if "list_of_strings" in file_fixture:
expected = {"text": ["First text.", "Second text.", "Third text."]}
else:
expected = {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
assert pa_table.to_pydict() == expected
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("json_file_with_list_of_dicts_with_sorted_columns", {}),
("json_file_with_list_of_dicts_with_sorted_columns_field", {"field": "field3"}),
],
)
def test_json_generate_tables_with_sorted_columns(file_fixture, config_kwargs, request):
builder = Json(**config_kwargs)
generator = builder._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.column_names == ["ID", "Language", "Topic"]
| datasets/tests/packaged_modules/test_json.py/0 | {
"file_path": "datasets/tests/packaged_modules/test_json.py",
"repo_id": "datasets",
"token_count": 3820
} |
import warnings
import pytest
import datasets.utils.deprecation_utils
from datasets.exceptions import (
ChecksumVerificationError,
ExpectedMoreDownloadedFilesError,
ExpectedMoreSplitsError,
NonMatchingChecksumError,
NonMatchingSplitsSizesError,
SplitsVerificationError,
UnexpectedDownloadedFileError,
UnexpectedSplitsError,
)
@pytest.mark.parametrize(
"error",
[
ChecksumVerificationError,
UnexpectedDownloadedFileError,
ExpectedMoreDownloadedFilesError,
NonMatchingChecksumError,
SplitsVerificationError,
UnexpectedSplitsError,
ExpectedMoreSplitsError,
NonMatchingSplitsSizesError,
],
)
def test_error_not_deprecated(error, monkeypatch):
monkeypatch.setattr(datasets.utils.deprecation_utils, "_emitted_deprecation_warnings", set())
with warnings.catch_warnings():
warnings.simplefilter("error")
error()
| datasets/tests/test_exceptions.py/0 | {
"file_path": "datasets/tests/test_exceptions.py",
"repo_id": "datasets",
"token_count": 360
} |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def add_one(i): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def test_parallel_backend_input():
with parallel_backend("spark"):
assert ParallelBackendConfig.backend_name == "spark"
lst = [1, 2, 3]
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=2)
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc", [2, -1])
def test_parallel_backend_map_nested(num_proc):
s1 = [1, 2]
s2 = {"a": 1, "b": 2}
s3 = {"a": [1, 2], "b": [3, 4]}
s4 = {"a": {"1": 1}, "b": 2}
s5 = {"a": 1, "b": 2, "c": 3, "d": 4}
expected_map_nested_s1 = [2, 3]
expected_map_nested_s2 = {"a": 2, "b": 3}
expected_map_nested_s3 = {"a": [2, 3], "b": [4, 5]}
expected_map_nested_s4 = {"a": {"1": 2}, "b": 3}
expected_map_nested_s5 = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark"):
assert map_nested(add_one, s1, num_proc=num_proc) == expected_map_nested_s1
assert map_nested(add_one, s2, num_proc=num_proc) == expected_map_nested_s2
assert map_nested(add_one, s3, num_proc=num_proc) == expected_map_nested_s3
assert map_nested(add_one, s4, num_proc=num_proc) == expected_map_nested_s4
assert map_nested(add_one, s5, num_proc=num_proc) == expected_map_nested_s5
| datasets/tests/test_parallel.py/0 | {
"file_path": "datasets/tests/test_parallel.py",
"repo_id": "datasets",
"token_count": 825
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Configuration
Schedulers from [`~schedulers.scheduling_utils.SchedulerMixin`] and models from [`ModelMixin`] inherit from [`ConfigMixin`] which stores all the parameters that are passed to their respective `__init__` methods in a JSON-configuration file.
<Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`.
</Tip>
## ConfigMixin
[[autodoc]] ConfigMixin
- load_config
- from_config
- save_config
- to_json_file
- to_json_string
| diffusers/docs/source/en/api/configuration.md/0 | {
"file_path": "diffusers/docs/source/en/api/configuration.md",
"repo_id": "diffusers",
"token_count": 325
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# AutoencoderOobleck
The Oobleck variational autoencoder (VAE) model with KL loss was introduced in [Stability-AI/stable-audio-tools](https://github.com/Stability-AI/stable-audio-tools) and [Stable Audio Open](https://huggingface.co/papers/2407.14358) by Stability AI. The model is used in 🤗 Diffusers to encode audio waveforms into latents and to decode latent representations into audio waveforms.
The abstract from the paper is:
*Open generative models are vitally important for the community, allowing for fine-tunes and serving as baselines when presenting new models. However, most current text-to-audio models are private and not accessible for artists and researchers to build upon. Here we describe the architecture and training process of a new open-weights text-to-audio model trained with Creative Commons data. Our evaluation shows that the model's performance is competitive with the state-of-the-art across various metrics. Notably, the reported FDopenl3 results (measuring the realism of the generations) showcase its potential for high-quality stereo sound synthesis at 44.1kHz.*
## AutoencoderOobleck
[[autodoc]] AutoencoderOobleck
- decode
- encode
- all
## OobleckDecoderOutput
[[autodoc]] models.autoencoders.autoencoder_oobleck.OobleckDecoderOutput
## OobleckDecoderOutput
[[autodoc]] models.autoencoders.autoencoder_oobleck.OobleckDecoderOutput
## AutoencoderOobleckOutput
[[autodoc]] models.autoencoders.autoencoder_oobleck.AutoencoderOobleckOutput
| diffusers/docs/source/en/api/models/autoencoder_oobleck.md/0 | {
"file_path": "diffusers/docs/source/en/api/models/autoencoder_oobleck.md",
"repo_id": "diffusers",
"token_count": 565
} |
<!--Copyright 2024 The HuggingFace Team and The InstantX Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ControlNetUnionModel
ControlNetUnionModel is an implementation of ControlNet for Stable Diffusion XL.
The ControlNet model was introduced in [ControlNetPlus](https://github.com/xinsir6/ControlNetPlus) by xinsir6. It supports multiple conditioning inputs without increasing computation.
*We design a new architecture that can support 10+ control types in condition text-to-image generation and can generate high resolution images visually comparable with midjourney. The network is based on the original ControlNet architecture, we propose two new modules to: 1 Extend the original ControlNet to support different image conditions using the same network parameter. 2 Support multiple conditions input without increasing computation offload, which is especially important for designers who want to edit image in detail, different conditions use the same condition encoder, without adding extra computations or parameters.*
## Loading
By default the [`ControlNetUnionModel`] should be loaded with [`~ModelMixin.from_pretrained`].
```py
from diffusers import StableDiffusionXLControlNetUnionPipeline, ControlNetUnionModel
controlnet = ControlNetUnionModel.from_pretrained("xinsir/controlnet-union-sdxl-1.0")
pipe = StableDiffusionXLControlNetUnionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet)
```
## ControlNetUnionModel
[[autodoc]] ControlNetUnionModel
| diffusers/docs/source/en/api/models/controlnet_union.md/0 | {
"file_path": "diffusers/docs/source/en/api/models/controlnet_union.md",
"repo_id": "diffusers",
"token_count": 486
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Transformer2DModel
A Transformer model for image-like data from [CompVis](https://huggingface.co/CompVis) that is based on the [Vision Transformer](https://huggingface.co/papers/2010.11929) introduced by Dosovitskiy et al. The [`Transformer2DModel`] accepts discrete (classes of vector embeddings) or continuous (actual embeddings) inputs.
When the input is **continuous**:
1. Project the input and reshape it to `(batch_size, sequence_length, feature_dimension)`.
2. Apply the Transformer blocks in the standard way.
3. Reshape to image.
When the input is **discrete**:
<Tip>
It is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image don't contain a prediction for the masked pixel because the unnoised image cannot be masked.
</Tip>
1. Convert input (classes of latent pixels) to embeddings and apply positional embeddings.
2. Apply the Transformer blocks in the standard way.
3. Predict classes of unnoised image.
## Transformer2DModel
[[autodoc]] Transformer2DModel
## Transformer2DModelOutput
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
| diffusers/docs/source/en/api/models/transformer2d.md/0 | {
"file_path": "diffusers/docs/source/en/api/models/transformer2d.md",
"repo_id": "diffusers",
"token_count": 465
} |