Skip to content

Commit f715b9e

Browse files
authored
Merge branch 'main' into android-0.6-docs
2 parents 5087e37 + 2972388 commit f715b9e

File tree

26 files changed

+155
-175
lines changed

26 files changed

+155
-175
lines changed

.ci/scripts/utils.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ install_executorch() {
3232
which pip
3333
# Install executorch, this assumes that Executorch is checked out in the
3434
# current directory.
35-
./install_executorch.sh --pybind xnnpack "$@"
35+
./install_executorch.sh "$@"
3636
# Just print out the list of packages for debugging
3737
pip list
3838
}

.github/workflows/apple.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ jobs:
3737
id: set_version
3838
shell: bash
3939
run: |
40-
VERSION="0.5.0.$(TZ='PST8PDT' date +%Y%m%d)"
40+
VERSION="0.7.0.$(TZ='PST8PDT' date +%Y%m%d)"
4141
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
4242
4343
build-demo-ios:

backends/xnnpack/operators/node_visitor.py

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def get_serialized_dtype(
210210
self,
211211
quant_params: Optional[QuantParams],
212212
node: torch.fx.Node,
213-
fp32_static_weight: bool = False,
213+
force_fp32: bool = False,
214214
) -> XNNDatatype:
215215
# Default initialization
216216
dtype = XNNDatatype.xnn_datatype_fp32
@@ -267,7 +267,7 @@ def get_per_channel_dtype(
267267
if node_dtype is not None and node_dtype == torch.float16:
268268
dtype = (
269269
XNNDatatype.xnn_datatype_fp32
270-
if fp32_static_weight
270+
if force_fp32
271271
else XNNDatatype.xnn_datatype_fp16
272272
)
273273

@@ -348,7 +348,7 @@ def define_tensor( # noqa: C901
348348
convert_to_nhwc: bool = False,
349349
swap_in_out_for_weights: bool = False,
350350
quant_params: Optional[QuantParams] = None,
351-
fp32_static_weights: bool = False,
351+
force_fp32: bool = False,
352352
groups: int = 1,
353353
) -> None:
354354
"""
@@ -368,7 +368,7 @@ def define_tensor( # noqa: C901
368368
constant data. If used along with convert_to_nhwc, this
369369
swap will happen before converting to nhwc.
370370
quant_params: Quantization meta data for this tensor, None if it is not quantized
371-
fp32_static_weights: XNN_FLAG_FP32_STATIC_WEIGHTS for fp16 conv
371+
force_fp32: forces tensor to be serialize as fp32, used for bias of dynamically quantized ops
372372
groups: number of groups for swap_in_out_for_weights
373373
"""
374374

@@ -405,7 +405,7 @@ def define_tensor( # noqa: C901
405405
convert_to_nhwc,
406406
swap_in_out_for_weights,
407407
quant_params,
408-
fp32_static_weights,
408+
force_fp32,
409409
groups,
410410
)
411411

@@ -417,9 +417,7 @@ def define_tensor( # noqa: C901
417417
check_or_raise(len(dims) == 4, "Converting to nhwc requires 4d tensor")
418418
dims = [dims[i] for i in PERM_NCHW_TO_NHWC]
419419

420-
dtype = self.get_serialized_dtype(
421-
quant_params, tensor, fp32_static_weight=fp32_static_weights
422-
)
420+
dtype = self.get_serialized_dtype(quant_params, tensor, force_fp32=force_fp32)
423421

424422
tvalue = XNNTensorValue(
425423
datatype=dtype,
@@ -504,7 +502,7 @@ def get_serialized_buffer_index(
504502
convert_to_nhwc: bool,
505503
swap_in_out_for_weights: bool,
506504
quant_params: Optional[QuantParams],
507-
fp32_static_weights: bool = False,
505+
force_fp32: bool = False,
508506
groups: int = 1,
509507
) -> int:
510508
"""
@@ -525,7 +523,7 @@ def get_serialized_buffer_index(
525523
constant data. If used along with convert_to_nhwc, this
526524
swap will happen before converting to nhwc.
527525
quant_params: Quantization meta data for this tensor, None if it is not quantize
528-
fp32_static_weights: bool to indicate whether tensor is fp32 static weights
526+
force_fp32: bool to indicate whether tensor is fp32 static weights
529527
groups: groups for swap_in_out_for_weights
530528
531529
Returns:
@@ -554,7 +552,7 @@ def get_serialized_buffer_index(
554552
# Quantize buffer if static data is indeed quantized
555553
if quant_params is not None and not quant_params.is_dynamic:
556554
const_val = quant_params.quantize_tensor(const_val).contiguous()
557-
elif const_val.dtype != torch.float16 or fp32_static_weights:
555+
elif const_val.dtype != torch.float16 or force_fp32:
558556
# ensure that the const is fp32
559557
const_val = const_val.to(dtype=torch.float32).contiguous()
560558

backends/xnnpack/operators/op_conv2d.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,6 @@ def define_node(
8282
weight_quant_params = QuantParams.from_weights(
8383
kernel_node, self._exported_program
8484
)
85-
fp32_static_weights = kernel_node.meta["val"].dtype == torch.float16
8685

8786
if weight_quant_params is not None and weight_quant_params.per_channel:
8887
if is_transpose:
@@ -102,8 +101,8 @@ def define_node(
102101
convert_to_nhwc=True,
103102
swap_in_out_for_weights=is_depthwise_conv or is_transpose,
104103
quant_params=weight_quant_params,
105-
fp32_static_weights=fp32_static_weights,
106104
groups=groups if is_transpose else 1,
105+
force_fp32=True,
107106
)
108107
kwargs["filter_id"] = vals_to_ids[get_input_node(node, 1)]
109108

@@ -127,13 +126,14 @@ def define_node(
127126
bias_quant_params = QuantParams.from_bias(
128127
bias_node, weight_quant_params, input_quant_params
129128
)
129+
130130
self.define_tensor(
131131
get_input_node(node, 2),
132132
xnn_graph,
133133
vals_to_ids,
134134
convert_to_nhwc=False,
135135
quant_params=bias_quant_params,
136-
fp32_static_weights=fp32_static_weights,
136+
force_fp32=True,
137137
)
138138
kwargs["bias_id"] = vals_to_ids[get_input_node(node, 2)]
139139

backends/xnnpack/operators/op_linear.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@ def define_node(
5959
xnn_graph,
6060
vals_to_ids,
6161
quant_params=weight_quant_params,
62-
fp32_static_weights=True,
6362
)
6463
filter_id = vals_to_ids[weight_node]
6564

@@ -69,12 +68,18 @@ def define_node(
6968
bias_quant_params = QuantParams.from_bias(
7069
bias_node, weight_quant_params, input_quant_params
7170
)
71+
# For dynamic quantization, there are no kernels with fp16 bias
72+
# So we need to force the fp16 bias to fp32
73+
force_fp32 = False
74+
if input_quant_params is not None and input_quant_params.is_dynamic:
75+
force_fp32 = True
76+
7277
self.define_tensor(
7378
get_input_node(node, 2),
7479
xnn_graph,
7580
vals_to_ids,
7681
quant_params=bias_quant_params,
77-
fp32_static_weights=True,
82+
force_fp32=force_fp32,
7883
)
7984
bias_id = vals_to_ids[bias_node]
8085
else:

backends/xnnpack/test/ops/test_linear.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -605,9 +605,7 @@ def _test_qd8_linear_per_tensor_unsupported(self, dtype: torch.dtype = torch.flo
605605

606606
if legacy_partitioner:
607607
tester.to_edge()
608-
tester.partition(
609-
Partition(DynamicallyQuantizedPartitioner)
610-
).dump_artifact()
608+
tester.partition(Partition(DynamicallyQuantizedPartitioner))
611609
# should have [add]mm node
612610
if uses_bias:
613611
tester.check(
@@ -624,7 +622,7 @@ def _test_qd8_linear_per_tensor_unsupported(self, dtype: torch.dtype = torch.flo
624622
else:
625623
tester.to_edge_transform_and_lower(
626624
ToEdgeTransformAndLower([DynamicallyQuantizedPartitioner])
627-
).dump_artifact()
625+
)
628626
# should not have a delegate node
629627
tester.check_not(
630628
[
@@ -717,7 +715,7 @@ def test_fp16_linear(self):
717715
num_batch_dims=num_batch_dims,
718716
uses_bias=use_bias,
719717
dtype=torch.float16,
720-
atol=5e-2, # TODO(T212995726): Investigate right atol for rand[n] inputs
718+
atol=5e-3, # TODO(T212995726): Investigate right atol for rand[n] inputs
721719
)
722720

723721
def test_fp32_linear(self):

docs/README.md

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -39,33 +39,32 @@ To build the documentation locally:
3939

4040
1. Clone the ExecuTorch repo to your machine.
4141

42-
1. If you don't have it already, start a conda environment:
42+
```bash
43+
git clone -b viable/strict https://github.com/pytorch/executorch.git && cd executorch
44+
```
4345

44-
```{note}
45-
The below command generates a completely new environment and resets
46-
any existing dependencies. If you have an environment already, skip
47-
the `conda create` command.
46+
1. If you don't have it already, start either a Python virtual envitonment:
47+
48+
```bash
49+
python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip
4850
```
4951

52+
Or a Conda environment:
53+
5054
```bash
51-
conda create -yn executorch python=3.10.0
52-
conda activate executorch
55+
conda create -yn executorch python=3.10.0 && conda activate executorch
5356
```
5457

5558
1. Install dependencies:
5659

5760
```bash
5861
pip3 install -r ./.ci/docker/requirements-ci.txt
5962
```
60-
1. Update submodules
6163

62-
```bash
63-
git submodule sync && git submodule update --init
64-
```
6564
1. Run:
6665

6766
```bash
68-
bash install_executorch.sh
67+
./install_executorch.sh
6968
```
7069

7170
1. Go to the `docs/` directory.

docs/source/getting-started.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ For a full example of running a model on Android, see the [DeepLabV3AndroidDemo]
137137
#### Installation
138138
ExecuTorch supports both iOS and MacOS via C++, as well as hardware backends for CoreML, MPS, and CPU. The iOS runtime library is provided as a collection of .xcframework targets and are made available as a Swift PM package.
139139

140-
To get started with Xcode, go to File > Add Package Dependencies. Paste the URL of the ExecuTorch repo into the search bar and select it. Make sure to change the branch name to the desired ExecuTorch version in format “swiftpm-”, (e.g. “swiftpm-0.5.0”). The ExecuTorch dependency can also be added to the package file manually. See [Using ExecuTorch on iOS](using-executorch-ios.md) for more information.
140+
To get started with Xcode, go to File > Add Package Dependencies. Paste the URL of the ExecuTorch repo into the search bar and select it. Make sure to change the branch name to the desired ExecuTorch version in format “swiftpm-”, (e.g. “swiftpm-0.6.0”). The ExecuTorch dependency can also be added to the package file manually. See [Using ExecuTorch on iOS](using-executorch-ios.md) for more information.
141141

142142
#### Runtime APIs
143143
Models can be loaded and run from Objective-C using the C++ APIs.
@@ -151,7 +151,7 @@ ExecuTorch provides C++ APIs, which can be used to target embedded or mobile dev
151151
CMake is the preferred build system for the ExecuTorch C++ runtime. To use with CMake, clone the ExecuTorch repository as a subdirectory of your project, and use CMake's `add_subdirectory("executorch")` to include the dependency. The `executorch` target, as well as kernel and backend targets will be made available to link against. The runtime can also be built standalone to support diverse toolchains. See [Using ExecuTorch with C++](using-executorch-cpp.md) for a detailed description of build integration, targets, and cross compilation.
152152

153153
```
154-
git clone -b release/0.5 https://github.com/pytorch/executorch.git
154+
git clone -b viable/strict https://github.com/pytorch/executorch.git
155155
```
156156
```python
157157
# CMakeLists.txt

docs/source/llm/getting-started.md

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -43,15 +43,17 @@ Instructions on installing miniconda can be [found here](https://docs.anaconda.c
4343
mkdir et-nanogpt
4444
cd et-nanogpt
4545
46-
# Clone the ExecuTorch repository and submodules.
46+
# Clone the ExecuTorch repository.
4747
mkdir third-party
48-
git clone -b release/0.4 https://github.com/pytorch/executorch.git third-party/executorch
49-
cd third-party/executorch
50-
git submodule update --init
48+
git clone -b viable/strict https://github.com/pytorch/executorch.git third-party/executorch && cd third-party/executorch
5149
52-
# Create a conda environment and install requirements.
53-
conda create -yn executorch python=3.10.0
54-
conda activate executorch
50+
# Create either a Python virtual environment:
51+
python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip
52+
53+
# Or a Conda environment:
54+
conda create -yn executorch python=3.10.0 && conda activate executorch
55+
56+
# Install requirements
5557
./install_executorch.sh
5658
5759
cd ../..
@@ -76,11 +78,8 @@ pyenv install -s 3.10
7678
pyenv virtualenv 3.10 executorch
7779
pyenv activate executorch
7880
79-
# Clone the ExecuTorch repository and submodules.
80-
mkdir third-party
81-
git clone -b release/0.4 https://github.com/pytorch/executorch.git third-party/executorch
82-
cd third-party/executorch
83-
git submodule update --init
81+
# Clone the ExecuTorch repository.
82+
git clone -b viable/strict https://github.com/pytorch/executorch.git third-party/executorch && cd third-party/executorch
8483
8584
# Install requirements.
8685
PYTHON_EXECUTABLE=python ./install_executorch.sh

docs/source/using-executorch-building-from-source.md

Lines changed: 11 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -36,27 +36,23 @@ portability details.
3636

3737
## Environment Setup
3838

39-
### Create a Virtual Environment
39+
### Clone ExecuTorch
4040

41-
[Install conda on your machine](https://conda.io/projects/conda/en/latest/user-guide/install/index.html). Then, create a virtual environment to manage our dependencies.
4241
```bash
43-
# Create and activate a conda environment named "executorch"
44-
conda create -yn executorch python=3.10.0
45-
conda activate executorch
42+
# Clone the ExecuTorch repo from GitHub
43+
git clone -b viable/strict https://github.com/pytorch/executorch.git && cd executorch
4644
```
4745

48-
### Clone ExecuTorch
46+
### Create a Virtual Environment
4947

48+
Create and activate a Python virtual environment:
5049
```bash
51-
# Clone the ExecuTorch repo from GitHub
52-
# 'main' branch is the primary development branch where you see the latest changes.
53-
# 'viable/strict' contains all of the commits on main that pass all of the necessary CI checks.
54-
git clone --branch viable/strict https://github.com/pytorch/executorch.git
55-
cd executorch
56-
57-
# Update and pull submodules
58-
git submodule sync
59-
git submodule update --init
50+
python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip
51+
```
52+
53+
Or alternatively, [install conda on your machine](https://conda.io/projects/conda/en/latest/user-guide/install/index.html). Then, create a Conda environment named "executorch".
54+
```bash
55+
conda create -yn executorch python=3.10.0 && conda activate executorch
6056
```
6157

6258
## Install ExecuTorch pip package from Source

docs/source/using-executorch-ios.md

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ The prebuilt ExecuTorch runtime, backend, and kernels are available as a [Swift
2525

2626
#### Xcode
2727

28-
In Xcode, go to `File > Add Package Dependencies`. Paste the URL of the [ExecuTorch repo](https://github.com/pytorch/executorch) into the search bar and select it. Make sure to change the branch name to the desired ExecuTorch version in format "swiftpm-<version>", (e.g. "swiftpm-0.5.0"), or a branch name in format "swiftpm-<version>.<year_month_date>" (e.g. "swiftpm-0.5.0-20250228") for a nightly build on a specific date.
28+
In Xcode, go to `File > Add Package Dependencies`. Paste the URL of the [ExecuTorch repo](https://github.com/pytorch/executorch) into the search bar and select it. Make sure to change the branch name to the desired ExecuTorch version in format "swiftpm-<version>", (e.g. "swiftpm-0.6.0"), or a branch name in format "swiftpm-<version>.<year_month_date>" (e.g. "swiftpm-0.7.0-20250401") for a nightly build on a specific date.
2929

3030
![](_static/img/swiftpm_xcode1.png)
3131

@@ -58,7 +58,7 @@ let package = Package(
5858
],
5959
dependencies: [
6060
// Use "swiftpm-<version>.<year_month_day>" branch name for a nightly build.
61-
.package(url: "https://github.com/pytorch/executorch.git", branch: "swiftpm-0.5.0")
61+
.package(url: "https://github.com/pytorch/executorch.git", branch: "swiftpm-0.6.0")
6262
],
6363
targets: [
6464
.target(
@@ -97,7 +97,7 @@ xcode-select --install
9797
2. Clone ExecuTorch:
9898

9999
```bash
100-
git clone https://github.com/pytorch/executorch.git --depth 1 --recurse-submodules --shallow-submodules && cd executorch
100+
git clone -b viable/strict https://github.com/pytorch/executorch.git && cd executorch
101101
```
102102

103103
3. Set up [Python](https://www.python.org/downloads/macos/) 3.10+ and activate a virtual environment:
@@ -106,15 +106,16 @@ git clone https://github.com/pytorch/executorch.git --depth 1 --recurse-submodul
106106
python3 -m venv .venv && source .venv/bin/activate && ./install_requirements.sh
107107
```
108108

109-
4. Install the required dependencies, including those needed for the backends like [Core ML](backends-coreml.md) or [MPS](backends-mps.md). Choose one:
109+
4. Install the required dependencies, including those needed for the backends like [Core ML](backends-coreml.md) or [MPS](backends-mps.md). Choose one, or both:
110110

111111
```bash
112112
# ExecuTorch with xnnpack and CoreML backend
113-
./install_executorch.sh --pybind xnnpack
113+
./backends/apple/coreml/scripts/install_requirements.sh
114+
./install_executorch.sh --pybind coreml xnnpack
114115

115-
# Optional: ExecuTorch with xnnpack, CoreML, and MPS backend
116+
# ExecuTorch with xnnpack and MPS backend
116117
./backends/apple/mps/install_requirements.sh
117-
./install_executorch.sh --pybind xnnpack mps
118+
./install_executorch.sh --pybind mps xnnpack
118119
```
119120

120121
5. Install [CMake](https://cmake.org):

0 commit comments

Comments
 (0)