diff --git a/.github/workflows/paddleocr.yml b/.github/workflows/paddleocr.yml new file mode 100644 index 000000000..220771066 --- /dev/null +++ b/.github/workflows/paddleocr.yml @@ -0,0 +1,80 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / paddleocr + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/paddleocr/**" + - "!integrations/paddleocr/*.md" + - ".github/workflows/paddleocr.yml" + +defaults: + run: + working-directory: integrations/paddleocr + +concurrency: + group: paddleocr-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + AISTUDIO_ACCESS_TOKEN: ${{ secrets.AISTUDIO_ACCESS_TOKEN }} + PADDLEOCR_VL_API_URL: ${{ secrets.PADDLEOCR_VL_API_URL }} + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.9", "3.12"] + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v6 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run fmt-check && hatch run test:types + + - name: Run tests + run: hatch run test:cov-retry + + - name: Run unit tests with lowest direct dependencies + run: | + hatch run uv pip compile pyproject.toml --resolution lowest-direct --output-file requirements_lowest_direct.txt + hatch -e test env run -- uv pip install -r requirements_lowest_direct.txt + hatch run test:unit + + - name: Nightly - run unit tests with Haystack main branch + if: github.event_name == 'schedule' + run: | + hatch env prune + hatch -e test env run -- uv pip install git+https://github.com/deepset-ai/haystack.git@main + hatch run test:unit + + - name: Send event to Datadog for nightly failures + if: failure() && github.event_name == 'schedule' + uses: ./.github/actions/send_failure + with: + title: | + Core integrations nightly tests failure: ${{ github.workflow }} + api-key: ${{ secrets.CORE_DATADOG_API_KEY }} diff --git a/README.md b/README.md index 68dfffe52..3be849ae7 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,7 @@ Please check out our [Contribution Guidelines](CONTRIBUTING.md) for all the deta | [openrouter-haystack](integrations/openrouter/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/openrouter-haystack.svg)](https://pypi.org/project/openrouter-haystack) | [![Test / openrouter](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/openrouter.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/openrouter.yml) | | [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | | [optimum-haystack](integrations/optimum/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/optimum-haystack.svg)](https://pypi.org/project/optimum-haystack) | [![Test / optimum](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/optimum.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/optimum.yml) | +| [paddleocr-haystack](integrations/paddleocr/) | Converter | [![PyPI - Version](https://img.shields.io/pypi/v/paddleocr-haystack.svg)](https://pypi.org/project/paddleocr-haystack) | [![Test / paddleocr](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/paddleocr.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/paddleocr.yml) | | [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) | | [pgvector-haystack](integrations/pgvector/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg?color=orange)](https://pypi.org/project/pgvector-haystack) | [![Test / pgvector](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml) | | [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | diff --git a/integrations/paddleocr/LICENSE.txt b/integrations/paddleocr/LICENSE.txt new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/integrations/paddleocr/LICENSE.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/integrations/paddleocr/README.md b/integrations/paddleocr/README.md new file mode 100644 index 000000000..fc0313086 --- /dev/null +++ b/integrations/paddleocr/README.md @@ -0,0 +1,17 @@ +# paddleocr-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/paddleocr-haystack.svg)](https://pypi.org/project/paddleocr-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/paddleocr-haystack.svg)](https://pypi.org/project/paddleocr-haystack) + +PaddleOCR integration for Haystack, providing components for text recognition and document parsing. + +- [Integration page](https://haystack.deepset.ai/integrations/paddleocr) +- [Changelog](https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/paddleocr/CHANGELOG.md) + +--- + +## Contributing + +Refer to the general [Contribution Guidelines](https://github.com/deepset-ai/haystack-core-integrations/blob/main/CONTRIBUTING.md). + +To run integration tests locally, you need to export the `PADDLEOCR_VL_API_URL` and `AISTUDIO_ACCESS_TOKEN` environment variables. diff --git a/integrations/paddleocr/pydoc/config.yml b/integrations/paddleocr/pydoc/config.yml new file mode 100644 index 000000000..f8172055c --- /dev/null +++ b/integrations/paddleocr/pydoc/config.yml @@ -0,0 +1,29 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.converters.paddleocr.paddleocr_vl_document_converter", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmeIntegrationRenderer + excerpt: PaddleOCR integration for Haystack + category_slug: integrations-api + title: PaddleOCR + slug: integrations-paddleocr + order: 180 + markdown: + descriptive_class_title: false + classdef_code_block: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_paddleocr.md diff --git a/integrations/paddleocr/pydoc/config_docusaurus.yml b/integrations/paddleocr/pydoc/config_docusaurus.yml new file mode 100644 index 000000000..75c6c7e28 --- /dev/null +++ b/integrations/paddleocr/pydoc/config_docusaurus.yml @@ -0,0 +1,28 @@ +loaders: +- ignore_when_discovered: + - __init__ + modules: + - haystack_integrations.components.converters.paddleocr.paddleocr_vl_document_converter + search_path: + - ../src + type: haystack_pydoc_tools.loaders.CustomPythonLoader +processors: +- do_not_filter_modules: false + documented_only: true + expression: null + skip_empty_modules: true + type: filter +- type: smart +- type: crossref +renderer: + description: PaddleOCR integration for Haystack + id: integrations-paddleocr + markdown: + add_member_class_prefix: false + add_method_class_prefix: true + classdef_code_block: false + descriptive_class_title: false + descriptive_module_title: true + filename: paddleocr.md + title: PaddleOCR + type: haystack_pydoc_tools.renderers.DocusaurusRenderer diff --git a/integrations/paddleocr/pyproject.toml b/integrations/paddleocr/pyproject.toml new file mode 100644 index 000000000..73322066b --- /dev/null +++ b/integrations/paddleocr/pyproject.toml @@ -0,0 +1,161 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "paddleocr-haystack" +dynamic = ["version"] +description = 'An integration of PaddleOCR with Haystack' +readme = "README.md" +requires-python = ">=3.9" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "deepset GmbH", email = "info@deepset.ai" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = [ + "haystack-ai>=2.19.0", + "paddleocr>=3.3.2", + "paddlex[serving]>=3.3.10", + "requests>=2.25.0", + "typing-extensions", +] + +[project.urls] +Documentation = "https://github.com/haystack-core-integrations/tree/main/integrations/paddleocr#readme" +Issues = "https://github.com/haystack-core-integrations/paddleocr/issues" +Source = "https://github.com/haystack-core-integrations/tree/main/integrations/paddleocr" + +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/paddleocr-v(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/paddleocr-v[0-9]*"' + +[tool.hatch.envs.default] +installer = "uv" +dependencies = ["haystack-pydoc-tools", "ruff"] + +[tool.hatch.envs.default.scripts] +docs = ["pydoc-markdown pydoc/config.yml"] +fmt = "ruff check --fix {args} && ruff format {args}" +fmt-check = "ruff check {args} && ruff format --check {args}" + +[tool.hatch.envs.test] +dependencies = [ + "mypy", + "pillow", + "pip", + "pypdfium2", + "pytest", + "pytest-cov", + "pytest-rerunfailures", +] + +[tool.hatch.envs.test.scripts] +unit = 'pytest -m "not integration" {args:tests}' +integration = 'pytest -m "integration" {args:tests}' +all = 'pytest {args:tests}' +cov-retry = 'pytest --cov=haystack_integrations --reruns 3 --reruns-delay 30 -x {args:tests}' +types = "mypy -p haystack_integrations.components.converters.paddleocr {args}" + +[tool.mypy] +install_types = true +non_interactive = true +check_untyped_defs = true +disallow_incomplete_defs = true + +[tool.ruff] +target-version = "py39" +line-length = 120 + +[tool.ruff.lint] +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Ignore checks for possible passwords + "S105", + "S106", + "S107", + # Ignore complexity + "C901", + "PLR0911", + "PLR0912", + "PLR0913", + "PLR0915", + # Misc + "B008", + "S101", +] +unfixable = [ + # Don't touch unused imports + "F401", +] + +[tool.ruff.lint.isort] +known-first-party = ["haystack_integrations"] + +[tool.ruff.lint.flake8-tidy-imports] +ban-relative-imports = "parents" + +[tool.ruff.lint.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +source = ["haystack_integrations"] +branch = true +parallel = false + +[tool.coverage.report] +omit = ["*/tests/*", "*/__init__.py"] +show_missing = true +exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] + +[tool.pytest.ini_options] +addopts = "--strict-markers" +markers = [ + "integration: integration tests", +] +log_cli = true diff --git a/integrations/paddleocr/src/haystack_integrations/components/converters/paddleocr/__init__.py b/integrations/paddleocr/src/haystack_integrations/components/converters/paddleocr/__init__.py new file mode 100644 index 000000000..dd4b18282 --- /dev/null +++ b/integrations/paddleocr/src/haystack_integrations/components/converters/paddleocr/__init__.py @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: 2025-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .paddleocr_vl_document_converter import PaddleOCRVLDocumentConverter + +__all__ = ["PaddleOCRVLDocumentConverter"] diff --git a/integrations/paddleocr/src/haystack_integrations/components/converters/paddleocr/paddleocr_vl_document_converter.py b/integrations/paddleocr/src/haystack_integrations/components/converters/paddleocr/paddleocr_vl_document_converter.py new file mode 100644 index 000000000..5bf2079a4 --- /dev/null +++ b/integrations/paddleocr/src/haystack_integrations/components/converters/paddleocr/paddleocr_vl_document_converter.py @@ -0,0 +1,491 @@ +# SPDX-FileCopyrightText: 2025-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +import base64 +from pathlib import Path +from typing import Any, Literal, Optional, Union + +import requests +from haystack import Document, component, default_from_dict, default_to_dict, logging +from haystack.components.converters.utils import ( + get_bytestream_from_source, + normalize_metadata, +) +from haystack.dataclasses import ByteStream +from haystack.utils import Secret, deserialize_secrets_inplace +from paddlex.inference.serving.schemas.paddleocr_vl import InferRequest as PaddleOCRVLInferRequest # type: ignore +from paddlex.inference.serving.schemas.paddleocr_vl import InferResult as PaddleOCRVLInferResult # type: ignore +from paddlex.inference.serving.schemas.shared.ocr import FileType # type: ignore +from typing_extensions import Self, TypeAlias + +logger = logging.getLogger(__name__) + + +FileTypeInput: TypeAlias = Union[Literal["pdf", "image", 0, 1], None] + +# Supported image file extensions +_IMAGE_EXTENSIONS = { + ".jpg", + ".jpeg", + ".png", + ".bmp", + ".tiff", + ".tif", + ".webp", +} +# Supported PDF file extensions +_PDF_EXTENSIONS = {".pdf"} + + +def _infer_file_type_from_source(source: Union[str, Path, ByteStream], bytestream: ByteStream) -> Optional[FileType]: + """ + Infer file type from file extension or MIME type. + + :param source: + Original source (file path, Path object, or ByteStream). + :param bytestream: + ByteStream object containing file metadata. + :returns: + Inferred file type: 0 for PDF, 1 for image, or None if cannot be + determined. + """ + # Try to get extension from file path + file_path: Optional[str] = None + + # Check if source is a file path + if isinstance(source, (str, Path)): + file_path = str(source) + # Check if source is ByteStream and has file_path in metadata + elif isinstance(source, ByteStream) and source.meta: + file_path = source.meta.get("file_path") + # Check ByteStream metadata for file_path + if file_path is None and isinstance(bytestream, ByteStream) and bytestream.meta: + file_path = bytestream.meta.get("file_path") + + # Try to infer from file extension + if file_path: + path_obj = Path(file_path) + extension = path_obj.suffix.lower() + + if extension in _PDF_EXTENSIONS: + return 0 + if extension in _IMAGE_EXTENSIONS: + return 1 + + # Try to infer from MIME type if available + if hasattr(bytestream, "mime_type") and bytestream.mime_type: + mime_type = bytestream.mime_type.lower() + if mime_type == "application/pdf": + return 0 + if mime_type.startswith("image/"): + return 1 + + return None + + +def _normalize_file_type(file_type: Optional[FileTypeInput]) -> Optional[FileType]: + """ + Normalize file type input to the numeric format expected by the API. + + :param file_type: + File type input. Can be "pdf" or 0 for PDF, "image" or 1 for image, + or `None` for auto-detection. + :returns: + Normalized file type: 0 for PDF, 1 for image, or `None` for + auto-detection. + """ + if file_type is None: + return None + if isinstance(file_type, str): + if file_type.lower() == "pdf": + return 0 + if file_type.lower() == "image": + return 1 + msg = f"Invalid `file_type` string: {file_type}. Must be 'pdf' or 'image'." + raise ValueError(msg) + if file_type in (0, 1): + return file_type + msg = f"Invalid `file_type` value: {file_type}. Must be 0, 1, 'pdf', 'image', or `None`." + raise ValueError(msg) + + +@component +class PaddleOCRVLDocumentConverter: + """ + This component extracts text from documents using PaddleOCR's large model + document parsing API. + + PaddleOCR-VL is used behind the scenes. For more information, please + refer to: + https://www.paddleocr.ai/latest/en/version3.x/algorithm/PaddleOCR-VL/PaddleOCR-VL.html + + **Usage Example:** + + ```python + from haystack.utils import Secret + from haystack_integrations.components.converters.paddleocr import ( + PaddleOCRVLDocumentConverter, + ) + + converter = PaddleOCRVLDocumentConverter( + api_url="http://xxxxx.aistudio-app.com/layout-parsing", + access_token=Secret.from_env_var("AISTUDIO_ACCESS_TOKEN"), + ) + + result = converter.run(sources=["sample.pdf"]) + + documents = result["documents"] + raw_responses = result["raw_paddleocr_responses"] + ``` + """ + + def __init__( + self, + api_url: str, + access_token: Secret = Secret.from_env_var("AISTUDIO_ACCESS_TOKEN"), + file_type: Optional[FileTypeInput] = None, + use_doc_orientation_classify: Optional[bool] = None, + use_doc_unwarping: Optional[bool] = None, + use_layout_detection: Optional[bool] = None, + use_chart_recognition: Optional[bool] = None, + layout_threshold: Optional[Union[float, dict]] = None, + layout_nms: Optional[bool] = None, + layout_unclip_ratio: Optional[Union[float, tuple[float, float], dict]] = None, + layout_merge_bboxes_mode: Optional[Union[str, dict]] = None, + prompt_label: Optional[str] = None, + format_block_content: Optional[bool] = None, + repetition_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + min_pixels: Optional[int] = None, + max_pixels: Optional[int] = None, + prettify_markdown: Optional[bool] = None, + show_formula_number: Optional[bool] = None, + visualize: Optional[bool] = None, + additional_params: Optional[dict[str, Any]] = None, + ): + """ + Create a `PaddleOCRVLDocumentConverter` component. + + :param api_url: + API URL. To obtain the API URL, visit the [PaddleOCR official + website](https://aistudio.baidu.com/paddleocr/task), click the + **API** button in the upper-left corner, choose the example code + for **Large Model document parsing(PaddleOCR-VL)**, and copy the + `API_URL`. + :param access_token: + AI Studio access token. You can obtain it from [this + page](https://aistudio.baidu.com/account/accessToken). + :param file_type: + File type. Can be "pdf" or 0 for PDF files, "image" or 1 for + image files, or `None` for auto-detection. If not specified, the + file type will be inferred from the file extension. + :param use_doc_orientation_classify: + Whether to enable the document orientation classification + function. Enabling this feature allows the input image to be + automatically rotated to the correct orientation. + :param use_doc_unwarping: + Whether to enable the text image unwarping function. Enabling + this feature allows automatic correction of distorted text images. + :param use_layout_detection: + Whether to enable the layout detection function. + :param use_chart_recognition: + Whether to enable the chart recognition function. + :param layout_threshold: + Layout detection threshold. Can be a float or a dict with + page-specific thresholds. + :param layout_nms: + Whether to perform NMS (Non-Maximum Suppression) on layout + detection results. + :param layout_unclip_ratio: + Layout unclip ratio. Can be a float, a tuple of (min, max), or a + dict with page-specific values. + :param layout_merge_bboxes_mode: + Layout merge bounding boxes mode. Can be a string or a dict. + :param prompt_label: + Prompt type for the VLM. Possible values are "ocr", "formula", + "table", and "chart". + :param format_block_content: + Whether to format block content. + :param repetition_penalty: + Repetition penalty parameter used in VLM sampling. + :param temperature: + Temperature parameter used in VLM sampling. + :param top_p: + Top-p parameter used in VLM sampling. + :param min_pixels: + Minimum number of pixels allowed during VLM preprocessing. + :param max_pixels: + Maximum number of pixels allowed during VLM preprocessing. + :param prettify_markdown: + Whether to prettify the output Markdown text. + :param show_formula_number: + Whether to include formula numbers in the output markdown text. + :param visualize: + Whether to return visualization results. + :param additional_params: + Additional parameters for calling the PaddleOCR API. + """ + self.api_url = api_url + self.access_token = access_token + self.file_type = _normalize_file_type(file_type) + self.use_doc_orientation_classify = use_doc_orientation_classify + self.use_doc_unwarping = use_doc_unwarping + self.use_layout_detection = use_layout_detection + self.use_chart_recognition = use_chart_recognition + self.layout_threshold = layout_threshold + self.layout_nms = layout_nms + self.layout_unclip_ratio = layout_unclip_ratio + self.layout_merge_bboxes_mode = layout_merge_bboxes_mode + self.prompt_label = prompt_label + self.format_block_content = format_block_content + self.repetition_penalty = repetition_penalty + self.temperature = temperature + self.top_p = top_p + self.min_pixels = min_pixels + self.max_pixels = max_pixels + self.prettify_markdown = prettify_markdown + self.show_formula_number = show_formula_number + self.visualize = visualize + self.additional_params = additional_params + + def to_dict(self) -> dict[str, Any]: + """ + Serialize the component to a dictionary. + + :returns: + Dictionary with serialized data. + """ + return default_to_dict( + self, + api_url=self.api_url, + access_token=self.access_token.to_dict(), + file_type=self.file_type, + use_doc_orientation_classify=self.use_doc_orientation_classify, + use_doc_unwarping=self.use_doc_unwarping, + use_layout_detection=self.use_layout_detection, + use_chart_recognition=self.use_chart_recognition, + layout_threshold=self.layout_threshold, + layout_nms=self.layout_nms, + layout_unclip_ratio=self.layout_unclip_ratio, + layout_merge_bboxes_mode=self.layout_merge_bboxes_mode, + prompt_label=self.prompt_label, + format_block_content=self.format_block_content, + repetition_penalty=self.repetition_penalty, + temperature=self.temperature, + top_p=self.top_p, + min_pixels=self.min_pixels, + max_pixels=self.max_pixels, + prettify_markdown=self.prettify_markdown, + show_formula_number=self.show_formula_number, + visualize=self.visualize, + additional_params=self.additional_params, + ) + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> Self: + """ + Deserialize the component from a dictionary. + + :param data: + Dictionary to deserialize from. + :returns: + Deserialized component. + """ + deserialize_secrets_inplace(data["init_parameters"], keys=["access_token"]) + return default_from_dict(cls, data) + + def _parse(self, data: bytes, file_type: FileType) -> tuple[str, dict[str, Any]]: + """ + Parse document using PaddleOCR API. + + :param data: + Raw file data as bytes. + :param file_type: + File type (0 for PDF, 1 for image). + :returns: + A tuple containing the extracted text (separated by form feed + characters for multiple pages) and the raw response dictionary. + :raises requests.RequestException: + If the API request fails. + :raises ValueError: + If the API response is invalid or missing required fields. + """ + # Encode file data to base64 + encoded_data = base64.b64encode(data).decode("ascii") + + # Build request payload + request_data = { + "file": encoded_data, + "fileType": file_type, + } + + # Add optional parameters if they are set + if self.use_doc_orientation_classify is not None: + request_data["useDocOrientationClassify"] = self.use_doc_orientation_classify + if self.use_doc_unwarping is not None: + request_data["useDocUnwarping"] = self.use_doc_unwarping + if self.use_layout_detection is not None: + request_data["useLayoutDetection"] = self.use_layout_detection + if self.use_chart_recognition is not None: + request_data["useChartRecognition"] = self.use_chart_recognition + if self.layout_threshold is not None: + request_data["layoutThreshold"] = self.layout_threshold + if self.layout_nms is not None: + request_data["layoutNms"] = self.layout_nms + if self.layout_unclip_ratio is not None: + request_data["layoutUnclipRatio"] = self.layout_unclip_ratio + if self.layout_merge_bboxes_mode is not None: + request_data["layoutMergeBboxesMode"] = self.layout_merge_bboxes_mode + if self.prompt_label is not None: + request_data["promptLabel"] = self.prompt_label + if self.format_block_content is not None: + request_data["formatBlockContent"] = self.format_block_content + if self.repetition_penalty is not None: + request_data["repetitionPenalty"] = self.repetition_penalty + if self.temperature is not None: + request_data["temperature"] = self.temperature + if self.top_p is not None: + request_data["topP"] = self.top_p + if self.min_pixels is not None: + request_data["minPixels"] = self.min_pixels + if self.max_pixels is not None: + request_data["maxPixels"] = self.max_pixels + if self.prettify_markdown is not None: + request_data["prettifyMarkdown"] = self.prettify_markdown + if self.show_formula_number is not None: + request_data["showFormulaNumber"] = self.show_formula_number + if self.visualize is not None: + request_data["visualize"] = self.visualize + if self.additional_params is not None: + request_data.update(self.additional_params) + + # Validate input parameters + try: + request = PaddleOCRVLInferRequest(**request_data) + request_dict = request.model_dump(exclude_none=True) + except Exception as e: + msg = f"Invalid request parameters: {e}" + raise ValueError(msg) from e + + # Prepare headers with authentication + access_token_value = self.access_token.resolve_value() if self.access_token else None + headers = {"Content-Type": "application/json"} + if access_token_value: + headers["Authorization"] = f"token {access_token_value}" + + # Make API request + try: + response = requests.post( + self.api_url, + json=request_dict, + headers=headers, + timeout=300, + ) + response.raise_for_status() + except requests.RequestException as e: + logger.error(f"Failed to call PaddleOCR API: {e}") + raise + + # Parse and validate response + try: + response_data = response.json() + except ValueError as e: + msg = f"Invalid JSON response from API: {e}" + raise ValueError(msg) from e + + if "result" not in response_data: + msg = "Response missing 'result' field" + raise ValueError(msg) + + try: + result = PaddleOCRVLInferResult(**response_data["result"]) + except Exception as e: + msg = f"Invalid response format: {e}" + raise ValueError(msg) from e + + # Extract text from markdown in layout parsing results + # Pages are separated by form feed character (\f) for compatibility + # with Haystack's `DocumentSplitter` + text_parts = [] + for layout_result in result.layoutParsingResults: + if layout_result.markdown and layout_result.markdown.text: + text_parts.append(layout_result.markdown.text) + + text = "\f".join(text_parts) if text_parts else "" + + return text, response_data + + @component.output_types(documents=list[Document], raw_paddleocr_responses=list[dict[str, Any]]) + def run( + self, + sources: list[Union[str, Path, ByteStream]], + meta: Optional[Union[dict[str, Any], list[dict[str, Any]]]] = None, + ) -> dict[str, Any]: + """ + Convert image or PDF files to Documents. + + :param sources: + List of image or PDF file paths or ByteStream objects. + :param meta: + Optional metadata to attach to the Documents. + This value can be either a list of dictionaries or a single + dictionary. If it's a single dictionary, its content is added to + the metadata of all produced Documents. If it's a list, the length + of the list must match the number of sources, because the two + lists will be zipped. If `sources` contains ByteStream objects, + their `meta` will be added to the output Documents. + + :returns: + A dictionary with the following keys: + - `documents`: A list of created Documents. + - `raw_paddleocr_responses`: A list of raw PaddleOCR API responses. + """ + documents = [] + raw_responses = [] + + meta_list = normalize_metadata(meta, sources_count=len(sources)) + + for source, metadata in zip(sources, meta_list): + try: + bytestream = get_bytestream_from_source(source) + except Exception as e: + logger.warning( + f"Could not read {source}. Skipping it. Error: {e}", + ) + continue + + # Determine file type (either from config or inferred from extension) + if self.file_type is not None: + file_type = self.file_type + else: + file_type = _infer_file_type_from_source(source, bytestream) + if file_type is None: + logger.warning( + f"Could not determine file type for {source}. Skipping it.", + ) + continue + + try: + text, raw_resp = self._parse(bytestream.data, file_type) + except Exception as e: + logger.warning( + f"Could not read {source} and convert it to Document, skipping. Error: {e}", + ) + continue + + if not text: + msg = ( + f"{self.__class__.__name__} could not extract text" + " from the file {source}. Returning an empty document." + ) + logger.warning(msg) + + merged_metadata = {**bytestream.meta, **metadata} + + document = Document(content=text, meta=merged_metadata) + documents.append(document) + + raw_responses.append(raw_resp) + + return {"documents": documents, "raw_paddleocr_responses": raw_responses} diff --git a/integrations/paddleocr/src/haystack_integrations/components/converters/py.typed b/integrations/paddleocr/src/haystack_integrations/components/converters/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/integrations/paddleocr/tests/__init__.py b/integrations/paddleocr/tests/__init__.py new file mode 100644 index 000000000..d391382c6 --- /dev/null +++ b/integrations/paddleocr/tests/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2025-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/paddleocr/tests/test_paddleocr_vl_document_converter.py b/integrations/paddleocr/tests/test_paddleocr_vl_document_converter.py new file mode 100644 index 000000000..77a1c6176 --- /dev/null +++ b/integrations/paddleocr/tests/test_paddleocr_vl_document_converter.py @@ -0,0 +1,784 @@ +# SPDX-FileCopyrightText: 2025-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +import os +from unittest.mock import MagicMock, patch + +import pypdfium2 as pdfium +import pytest +import requests +from haystack import Document +from haystack.dataclasses import ByteStream +from haystack.utils import Secret +from PIL import Image + +from haystack_integrations.components.converters.paddleocr import ( + PaddleOCRVLDocumentConverter, +) + + +def download_test_file(url, dest_path, timeout=30): + """Download a test file from URL to destination path.""" + try: + response = requests.get(url, timeout=timeout) + response.raise_for_status() + dest_path.write_bytes(response.content) + return True + except Exception as e: + pytest.skip(f"Failed to download test file from {url}: {e}") + return False + + +def create_empty_pdf(tmp_path, filename="test.pdf"): + """Create an empty PDF file using pypdfium2.""" + pdf = pdfium.PdfDocument.new() + pdf.new_page(595, 842) # A4 size in points + pdf.save(tmp_path / filename) + return tmp_path / filename + + +def create_empty_image(tmp_path, filename="test.png"): + """Create an empty image file using PIL.""" + img = Image.new("RGB", (800, 600), color="white") + img.save(tmp_path / filename) + return tmp_path / filename + + +class TestPaddleOCRVLDocumentConverter: + CLASS_TYPE = "haystack_integrations.components.converters.paddleocr.paddleocr_vl_document_converter.PaddleOCRVLDocumentConverter" # noqa: E501 + + def test_init_default(self, monkeypatch): + monkeypatch.setenv("AISTUDIO_ACCESS_TOKEN", "test-access-token") + converter = PaddleOCRVLDocumentConverter(api_url="http://test-api-url.com") + + assert converter.access_token == Secret.from_env_var("AISTUDIO_ACCESS_TOKEN") + assert converter.api_url == "http://test-api-url.com" + assert converter.file_type is None + assert converter.use_doc_orientation_classify is None + assert converter.use_doc_unwarping is None + assert converter.use_layout_detection is None + assert converter.use_chart_recognition is None + assert converter.layout_threshold is None + assert converter.layout_nms is None + assert converter.layout_unclip_ratio is None + assert converter.layout_merge_bboxes_mode is None + assert converter.prompt_label is None + assert converter.format_block_content is None + assert converter.repetition_penalty is None + assert converter.temperature is None + assert converter.top_p is None + assert converter.min_pixels is None + assert converter.max_pixels is None + assert converter.prettify_markdown is None + assert converter.show_formula_number is None + assert converter.visualize is None + assert converter.additional_params is None + + def test_init_with_all_optional_parameters(self): + converter = PaddleOCRVLDocumentConverter( + api_url="http://custom-api-url.com", + access_token=Secret.from_token("test-access-token"), + file_type="pdf", + use_doc_orientation_classify=True, + use_doc_unwarping=True, + use_layout_detection=True, + use_chart_recognition=True, + layout_threshold=0.5, + layout_nms=True, + layout_unclip_ratio=1.5, + layout_merge_bboxes_mode="merge", + prompt_label="ocr", + format_block_content=True, + repetition_penalty=1.1, + temperature=0.7, + top_p=0.9, + min_pixels=100, + max_pixels=1000, + prettify_markdown=False, + show_formula_number=True, + visualize=True, + additional_params={}, + ) + + assert converter.api_url == "http://custom-api-url.com" + assert converter.access_token == Secret.from_token("test-access-token") + assert converter.file_type == 0 # "pdf" normalized to 0 + assert converter.use_doc_orientation_classify is True + assert converter.use_doc_unwarping is True + assert converter.use_layout_detection is True + assert converter.use_chart_recognition is True + assert converter.layout_threshold == 0.5 + assert converter.layout_nms is True + assert converter.layout_unclip_ratio == 1.5 + assert converter.layout_merge_bboxes_mode == "merge" + assert converter.prompt_label == "ocr" + assert converter.format_block_content is True + assert converter.repetition_penalty == 1.1 + assert converter.temperature == 0.7 + assert converter.top_p == 0.9 + assert converter.min_pixels == 100 + assert converter.max_pixels == 1000 + assert converter.prettify_markdown is False + assert converter.show_formula_number is True + assert converter.visualize is True + assert converter.additional_params == {} + + def test_to_dict(self, monkeypatch): + monkeypatch.setenv("AISTUDIO_ACCESS_TOKEN", "test-access-token") + converter = PaddleOCRVLDocumentConverter("http://test-api-url.com") + converter_dict = converter.to_dict() + + assert converter_dict == { + "type": self.CLASS_TYPE, + "init_parameters": { + "api_url": "http://test-api-url.com", + "access_token": { + "env_vars": ["AISTUDIO_ACCESS_TOKEN"], + "strict": True, + "type": "env_var", + }, + "file_type": None, + "use_doc_orientation_classify": None, + "use_doc_unwarping": None, + "use_layout_detection": None, + "use_chart_recognition": None, + "layout_threshold": None, + "layout_nms": None, + "layout_unclip_ratio": None, + "layout_merge_bboxes_mode": None, + "prompt_label": None, + "format_block_content": None, + "repetition_penalty": None, + "temperature": None, + "top_p": None, + "min_pixels": None, + "max_pixels": None, + "prettify_markdown": None, + "show_formula_number": None, + "visualize": None, + "additional_params": None, + }, + } + + def test_to_dict_with_custom_parameters(self, monkeypatch): + monkeypatch.setenv("CUSTOM_ACCESS_TOKEN", "test-access-token") + converter = PaddleOCRVLDocumentConverter( + api_url="http://custom-api-url.com", + access_token=Secret.from_env_var("CUSTOM_ACCESS_TOKEN", strict=False), + file_type="image", + use_doc_orientation_classify=True, + use_doc_unwarping=False, + use_layout_detection=True, + use_chart_recognition=False, + layout_threshold=0.7, + layout_nms=False, + layout_unclip_ratio=2.0, + layout_merge_bboxes_mode="separate", + prompt_label="formula", + format_block_content=False, + repetition_penalty=1.2, + temperature=0.8, + top_p=0.95, + min_pixels=200, + max_pixels=2000, + prettify_markdown=True, + show_formula_number=True, + visualize=False, + additional_params={}, + ) + converter_dict = converter.to_dict() + + assert converter_dict == { + "type": self.CLASS_TYPE, + "init_parameters": { + "api_url": "http://custom-api-url.com", + "access_token": { + "type": "env_var", + "env_vars": ["CUSTOM_ACCESS_TOKEN"], + "strict": False, + }, + "file_type": 1, # "image" normalized to 1 + "use_doc_orientation_classify": True, + "use_doc_unwarping": False, + "use_layout_detection": True, + "use_chart_recognition": False, + "layout_threshold": 0.7, + "layout_nms": False, + "layout_unclip_ratio": 2.0, + "layout_merge_bboxes_mode": "separate", + "prompt_label": "formula", + "format_block_content": False, + "repetition_penalty": 1.2, + "temperature": 0.8, + "top_p": 0.95, + "min_pixels": 200, + "max_pixels": 2000, + "prettify_markdown": True, + "show_formula_number": True, + "visualize": False, + "additional_params": {}, + }, + } + + def test_from_dict(self, monkeypatch): + monkeypatch.setenv("AISTUDIO_ACCESS_TOKEN", "test-access-token") + converter_dict = { + "type": self.CLASS_TYPE, + "init_parameters": { + "api_url": "http://test-api-url.com", + "access_token": { + "env_vars": ["AISTUDIO_ACCESS_TOKEN"], + "strict": True, + "type": "env_var", + }, + "file_type": None, + "use_doc_orientation_classify": None, + "use_doc_unwarping": None, + "use_layout_detection": None, + "use_chart_recognition": None, + "layout_threshold": None, + "layout_nms": None, + "layout_unclip_ratio": None, + "layout_merge_bboxes_mode": None, + "prompt_label": None, + "format_block_content": None, + "repetition_penalty": None, + "temperature": None, + "top_p": None, + "min_pixels": None, + "max_pixels": None, + "prettify_markdown": None, + "show_formula_number": None, + "visualize": None, + "additional_params": None, + }, + } + + converter = PaddleOCRVLDocumentConverter.from_dict(converter_dict) + + assert converter.api_url == "http://test-api-url.com" + assert converter.file_type is None + assert converter.use_doc_orientation_classify is None + assert converter.use_doc_unwarping is None + assert converter.use_layout_detection is None + assert converter.use_chart_recognition is None + assert converter.layout_threshold is None + assert converter.layout_nms is None + assert converter.layout_unclip_ratio is None + assert converter.layout_merge_bboxes_mode is None + assert converter.prompt_label is None + assert converter.format_block_content is None + assert converter.repetition_penalty is None + assert converter.temperature is None + assert converter.top_p is None + assert converter.min_pixels is None + assert converter.max_pixels is None + assert converter.prettify_markdown is None + assert converter.show_formula_number is None + assert converter.visualize is None + assert converter.additional_params is None + + def test_from_dict_with_custom_parameters(self, monkeypatch): + monkeypatch.setenv("AISTUDIO_ACCESS_TOKEN", "test-access-token") + converter_dict = { + "type": self.CLASS_TYPE, + "init_parameters": { + "api_url": "http://custom-api-url.com", + "access_token": { + "env_vars": ["AISTUDIO_ACCESS_TOKEN"], + "strict": True, + "type": "env_var", + }, + "file_type": 0, + "use_doc_orientation_classify": True, + "use_doc_unwarping": False, + "use_layout_detection": True, + "use_chart_recognition": False, + "layout_threshold": 0.6, + "layout_nms": True, + "layout_unclip_ratio": 1.8, + "layout_merge_bboxes_mode": "merge", + "prompt_label": "table", + "format_block_content": True, + "repetition_penalty": 1.1, + "temperature": 0.9, + "top_p": 0.8, + "min_pixels": 150, + "max_pixels": 1500, + "prettify_markdown": False, + "show_formula_number": True, + "visualize": True, + "additional_params": {}, + }, + } + + converter = PaddleOCRVLDocumentConverter.from_dict(converter_dict) + + assert converter.api_url == "http://custom-api-url.com" + assert converter.file_type == 0 + assert converter.use_doc_orientation_classify is True + assert converter.use_doc_unwarping is False + assert converter.use_layout_detection is True + assert converter.use_chart_recognition is False + assert converter.layout_threshold == 0.6 + assert converter.layout_nms is True + assert converter.layout_unclip_ratio == 1.8 + assert converter.layout_merge_bboxes_mode == "merge" + assert converter.prompt_label == "table" + assert converter.format_block_content is True + assert converter.repetition_penalty == 1.1 + assert converter.temperature == 0.9 + assert converter.top_p == 0.8 + assert converter.min_pixels == 150 + assert converter.max_pixels == 1500 + assert converter.prettify_markdown is False + assert converter.show_formula_number is True + assert converter.visualize is True + assert converter.additional_params == {} + + @pytest.fixture + def mock_ocr_response(self): + """Create a mock PaddleOCR response""" + mock_response = { + "logId": "123", + "errorCode": "0", + "errorMsg": "Success", + "result": { + "layoutParsingResults": [ + { + "markdown": {"text": "# Sample Document\n\nThis is page 1."}, + "prunedResult": {}, + } + ], + "dataInfo": { + "width": 1024, + "height": 1024, + "type": "image", + }, + }, + } + return mock_response + + @pytest.fixture + def mock_ocr_response_with_multiple_pages(self): + """Create a mock PaddleOCR response with multiple pages""" + mock_response = { + "logId": "123", + "errorCode": "0", + "errorMsg": "Success", + "result": { + "layoutParsingResults": [ + { + "markdown": {"text": "# Page 1"}, + "prunedResult": {}, + }, + { + "markdown": {"text": "# Page 2"}, + "prunedResult": {}, + }, + ], + "dataInfo": { + "numPages": 2, + "pages": [ + {"width": 512, "height": 512}, + {"width": 512, "height": 512}, + ], + "type": "pdf", + }, + }, + } + return mock_response + + @pytest.fixture + def integration_enabled(self): + """Check if integration tests should run.""" + return bool(os.environ.get("PADDLEOCR_VL_API_URL") and os.environ.get("AISTUDIO_ACCESS_TOKEN")) + + @pytest.mark.parametrize( + "source_type", + ["file_path_str", "path_object", "bytestream"], + ) + def test_run_with_local_sources(self, mock_ocr_response, tmp_path, source_type): + """Test processing with local source types (str, Path, ByteStream)""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + # Create temporary file + test_file = create_empty_image(tmp_path, "test.png") + + # Create the source based on type + if source_type == "file_path_str": + source = str(test_file) + elif source_type == "path_object": + source = test_file + else: # bytestream + source = ByteStream(data=test_file.read_bytes(), meta={"file_path": str(test_file)}) + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + result = converter.run(sources=[source]) + + assert len(result["documents"]) == 1 + assert isinstance(result["documents"][0], Document) + assert result["documents"][0].content == "# Sample Document\n\nThis is page 1." + assert len(result["raw_paddleocr_responses"]) == 1 + assert result["raw_paddleocr_responses"][0] == mock_ocr_response + + def test_run_with_multiple_sources(self, mock_ocr_response, tmp_path): + """Test processing with multiple source types""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + # Create temporary files + test_file1 = create_empty_image(tmp_path, "test1.png") + test_file2 = create_empty_image(tmp_path, "test2.png") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + sources = [ + str(test_file1), + test_file2, + ByteStream(data=test_file1.read_bytes(), meta={"file_path": str(test_file1)}), + ] + result = converter.run(sources=sources) + + assert len(result["documents"]) == 3 + assert all(isinstance(doc, Document) for doc in result["documents"]) + assert len(result["raw_paddleocr_responses"]) == 3 + + def test_run_handles_api_error(self, mock_ocr_response, tmp_path): + """Test error handling when API fails""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + test_file1 = create_empty_image(tmp_path, "test1.png") + test_file2 = create_empty_image(tmp_path, "test2.png") + test_file3 = create_empty_image(tmp_path, "test3.png") + + with patch("requests.post") as mock_post: + error_response = MagicMock() + error_response.status_code = 404 + error_response.raise_for_status.side_effect = requests.HTTPError("404 Client Error") + + # First call succeeds, second fails, third succeeds + mock_post.side_effect = [ + MagicMock(status_code=200, json=lambda: mock_ocr_response), + error_response, + MagicMock(status_code=200, json=lambda: mock_ocr_response), + ] + + sources = [str(test_file1), str(test_file2), str(test_file3)] + result = converter.run(sources=sources) + + # Should only return 2 documents (failed source skipped) + assert len(result["documents"]) == 2 + assert len(result["raw_paddleocr_responses"]) == 2 + + def test_run_with_meta_single_dict(self, mock_ocr_response, tmp_path): + """Test that meta parameter with single dict is applied to all documents""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + test_file1 = create_empty_image(tmp_path, "test1.png") + test_file2 = create_empty_image(tmp_path, "test2.png") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + sources = [str(test_file1), str(test_file2)] + result = converter.run(sources=sources, meta={"department": "engineering", "year": 2024}) + + assert len(result["documents"]) == 2 + # Both documents should have the same metadata + for doc in result["documents"]: + assert doc.meta["department"] == "engineering" + assert doc.meta["year"] == 2024 + # File path metadata should still be present + assert "file_path" in doc.meta + + def test_run_with_meta_list_of_dicts(self, mock_ocr_response, tmp_path): + """Test that meta parameter with list of dicts applies each dict to corresponding document""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + test_file1 = create_empty_image(tmp_path, "test1.png") + test_file2 = create_empty_image(tmp_path, "test2.png") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + sources = [str(test_file1), str(test_file2)] + result = converter.run( + sources=sources, + meta=[ + {"author": "Alice", "category": "report"}, + {"author": "Bob", "category": "invoice"}, + ], + ) + + assert len(result["documents"]) == 2 + # First document + assert result["documents"][0].meta["author"] == "Alice" + assert result["documents"][0].meta["category"] == "report" + # Second document + assert result["documents"][1].meta["author"] == "Bob" + assert result["documents"][1].meta["category"] == "invoice" + # File path metadata should still be present in both + assert "file_path" in result["documents"][0].meta + assert "file_path" in result["documents"][1].meta + + def test_run_with_meta_none(self, mock_ocr_response, tmp_path): + """Test that meta parameter with `None` works correctly""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + test_file = create_empty_image(tmp_path, "test.png") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + sources = [str(test_file)] + result = converter.run(sources=sources, meta=None) + + assert len(result["documents"]) == 1 + # Only file path metadata should be present + assert "file_path" in result["documents"][0].meta + + def test_file_type_auto_detection_pdf(self, mock_ocr_response_with_multiple_pages, tmp_path): + """Test that file_type is automatically detected as PDF from .pdf extension""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + # Create a PDF file + pdf_file = create_empty_pdf(tmp_path, "test.pdf") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response_with_multiple_pages + mock_post.return_value = mock_response + + result = converter.run(sources=[str(pdf_file)]) + + assert len(result["documents"]) == 1 + # Verify that the correct file type (0 for PDF) was used in the API call + call_args = mock_post.call_args[1]["json"] + assert call_args["fileType"] == 0 # Should be 0 for PDF + + def test_file_type_auto_detection_image(self, mock_ocr_response, tmp_path): + """Test that file_type is automatically detected as image from .png extension""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + # Create an image file + image_file = create_empty_image(tmp_path, "test.png") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + result = converter.run(sources=[str(image_file)]) + + assert len(result["documents"]) == 1 + # Verify that the correct file type (1 for image) was used in the API call + call_args = mock_post.call_args[1]["json"] + assert call_args["fileType"] == 1 # Should be 1 for image + + def test_file_type_manual_specification_pdf(self, mock_ocr_response_with_multiple_pages, tmp_path): + """Test that manually specified file_type overrides auto-detection""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", + access_token=Secret.from_token("test-access-token"), + file_type="pdf", # Manually specify as PDF + ) + + # Create an image file (which would normally auto-detect as image) + image_file = create_empty_image(tmp_path, "test.png") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response_with_multiple_pages + mock_post.return_value = mock_response + + result = converter.run(sources=[str(image_file)]) + + assert len(result["documents"]) == 1 + # Verify that the manually specified file type (0 for PDF) was used + call_args = mock_post.call_args[1]["json"] + assert call_args["fileType"] == 0 # Should be 0 (PDF) despite being a PNG file + + def test_file_type_manual_specification_image(self, mock_ocr_response, tmp_path): + """Test that manually specified file_type overrides auto-detection""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", + access_token=Secret.from_token("test-access-token"), + file_type=1, # Manually specify as image (numeric) + ) + + # Create a PDF file (which would normally auto-detect as PDF) + pdf_file = create_empty_pdf(tmp_path, "test.pdf") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + result = converter.run(sources=[str(pdf_file)]) + + assert len(result["documents"]) == 1 + # Verify that the manually specified file type (1 for image) was used + call_args = mock_post.call_args[1]["json"] + assert call_args["fileType"] == 1 # Should be 1 (image) despite being a PDF file + + def test_file_type_auto_detection_unknown_extension(self, mock_ocr_response, tmp_path): + """Test that unknown file extensions result in skipping the file""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + # Create a file with unknown extension + unknown_file = tmp_path / "test.unknown" + unknown_file.write_bytes(b"dummy data") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + result = converter.run(sources=[str(unknown_file)]) + + # Should skip the file due to unknown extension + assert len(result["documents"]) == 0 + assert len(result["raw_paddleocr_responses"]) == 0 + # requests.post should not be called at all + mock_post.assert_not_called() + + def test_file_type_auto_detection_bytestream(self, mock_ocr_response, tmp_path): + """Test file_type auto-detection with ByteStream input""" + converter = PaddleOCRVLDocumentConverter( + api_url="http://test-api-url.com", access_token=Secret.from_token("test-access-token") + ) + + # Create files + pdf_file = create_empty_pdf(tmp_path, "test.pdf") + image_file = create_empty_image(tmp_path, "test.png") + + with patch("requests.post") as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_ocr_response + mock_post.return_value = mock_response + + # Test PDF ByteStream + pdf_bytestream = ByteStream(data=pdf_file.read_bytes(), meta={"file_path": str(pdf_file)}) + result_pdf = converter.run(sources=[pdf_bytestream]) + + # Test image ByteStream + image_bytestream = ByteStream(data=image_file.read_bytes(), meta={"file_path": str(image_file)}) + result_image = converter.run(sources=[image_bytestream]) + + # Both should succeed + assert len(result_pdf["documents"]) == 1 + assert len(result_image["documents"]) == 1 + + # Verify API calls used correct file types + assert mock_post.call_count == 2 + calls = mock_post.call_args_list + # First call should be for PDF (fileType=0) + assert calls[0][1]["json"]["fileType"] == 0 + # Second call should be for image (fileType=1) + assert calls[1][1]["json"]["fileType"] == 1 + + @pytest.mark.skipif( + not os.environ.get("PADDLEOCR_VL_API_URL") or not os.environ.get("AISTUDIO_ACCESS_TOKEN"), + reason="Export env vars `PADDLEOCR_VL_API_URL` and `AISTUDIO_ACCESS_TOKEN` to run this test.", + ) + @pytest.mark.integration + @pytest.mark.parametrize( + "test_files,expected_docs", + [ + ( + [ + ( + "https://paddle-model-ecology.bj.bcebos.com/paddlex/serving/pipeline_data/ppchatocr/contract.pdf", + "contract.pdf", + ) + ], + 1, + ), + ( + [ + ( + "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_002.png", + "general_ocr_002.png", + ) + ], + 1, + ), + ( + [ + ( + "https://paddle-model-ecology.bj.bcebos.com/paddlex/serving/pipeline_data/ppchatocr/contract.pdf", + "contract.pdf", + ), + ( + "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_002.png", + "general_ocr_002.png", + ), + ], + 2, + ), + ], + ids=["pdf_only", "image_only", "mixed_pdf_image"], + ) + def test_integration_run_with_files(self, tmp_path, test_files, expected_docs): + """Integration test with real API call using various file types""" + # Download all test files + source_files = [] + for url, filename in test_files: + file_path = tmp_path / filename + if download_test_file(url, file_path): + source_files.append(str(file_path)) + + if not source_files: + pytest.skip("Failed to download any test files") + + converter = PaddleOCRVLDocumentConverter(api_url=os.environ["PADDLEOCR_VL_API_URL"]) + + result = converter.run(sources=source_files) + + assert len(result["documents"]) == expected_docs + assert all(isinstance(doc, Document) for doc in result["documents"]) + assert all(len(doc.content) > 0 for doc in result["documents"]) + assert "raw_paddleocr_responses" in result + assert len(result["raw_paddleocr_responses"]) == expected_docs