diff --git a/docs/02_guides/05_scrapy.mdx b/docs/02_guides/05_scrapy.mdx
index 98526e65..35b6bb5e 100644
--- a/docs/02_guides/05_scrapy.mdx
+++ b/docs/02_guides/05_scrapy.mdx
@@ -7,60 +7,64 @@ import CodeBlock from '@theme/CodeBlock';
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import UnderscoreMainExample from '!!raw-loader!./code/scrapy_src/__main__.py';
-import MainExample from '!!raw-loader!./code/scrapy_src/main.py';
-import ItemsExample from '!!raw-loader!./code/scrapy_src/items.py';
-import SettingsExample from '!!raw-loader!./code/scrapy_src/settings.py';
-import TitleSpiderExample from '!!raw-loader!./code/scrapy_src/spiders/title.py';
+import UnderscoreMainExample from '!!raw-loader!./code/scrapy_project/src/__main__.py';
+import MainExample from '!!raw-loader!./code/scrapy_project/src/main.py';
+import ItemsExample from '!!raw-loader!./code/scrapy_project/src/items.py';
+import SpidersExample from '!!raw-loader!./code/scrapy_project/src/spiders/title.py';
+import SettingsExample from '!!raw-loader!./code/scrapy_project/src/settings.py';
-[Scrapy](https://scrapy.org/) is an open-source web scraping framework written in Python. It provides a complete set of tools for web scraping, including the ability to define how to extract data from websites, handle pagination and navigation.
+[Scrapy](https://scrapy.org/) is an open-source web scraping framework for Python. It provides tools for defining scrapers, extracting data from web pages, following links, and handling pagination. With the Apify SDK, Scrapy projects can be converted into Apify [Actors](https://docs.apify.com/platform/actors), integrated with Apify [storages](https://docs.apify.com/platform/storage), and executed on the Apify [platform](https://docs.apify.com/platform).
-:::tip
+## Integrating Scrapy with the Apify platform
-Our CLI now supports transforming Scrapy projects into Apify Actors with a single command! Check out the [Scrapy migration guide](https://docs.apify.com/cli/docs/integrating-scrapy) for more information.
+The Apify SDK provides an Apify-Scrapy integration. The main challenge of this is to combine two asynchronous frameworks that use different event loop implementations. Scrapy uses [Twisted](https://twisted.org/) for asynchronous execution, while the Apify SDK is based on [asyncio](https://docs.python.org/3/library/asyncio.html). The key thing is to install the Twisted's `asyncioreactor` to run Twisted's asyncio compatible event loop. This allows both Twisted and asyncio to run on a single event loop, enabling a Scrapy spider to run as an Apify Actor with minimal modifications.
-:::
+
+ {UnderscoreMainExample}
+
-Some of the key features of Scrapy for web scraping include:
+In this setup, `apify.scrapy.initialize_logging` configures an Apify log formatter and reconfigures loggers to ensure consistent logging across Scrapy, the Apify SDK, and other libraries. The `apify.scrapy.run_scrapy_actor` bridges asyncio coroutines with Twisted's reactor, enabling the Actor's main coroutine, which contains the Scrapy spider, to be executed.
-- **Request and response handling** - Scrapy provides an easy-to-use interface for making HTTP requests and handling responses,
-allowing you to navigate through web pages and extract data.
-- **Robust Spider framework** - Scrapy has a spider framework that allows you to define how to scrape data from websites,
-including how to follow links, how to handle pagination, and how to parse the data.
-- **Built-in data extraction** - Scrapy includes built-in support for data extraction using XPath and CSS selectors,
-allowing you to easily extract data from HTML and XML documents.
-- **Integration with other tool** - Scrapy can be integrated with other Python tools like BeautifulSoup and Selenium for more advanced scraping tasks.
+Make sure the `SCRAPY_SETTINGS_MODULE` environment variable is set to the path of the Scrapy settings module. This variable is also used by the `Actor` class to detect that the project is a Scrapy project, triggering additional actions.
-## Using Scrapy template
+
+ {MainExample}
+
-The fastest way to start using Scrapy in Apify Actors is by leveraging the [Scrapy Actor template](https://apify.com/templates/categories/python). This template provides a pre-configured structure and setup necessary to integrate Scrapy into your Actors seamlessly. It includes: setting up the Scrapy settings, `asyncio` reactor, Actor logger, and item pipeline as necessary to make Scrapy spiders run in Actors and save their outputs in Apify datasets.
+Within the Actor's main coroutine, the Actor's input is processed as usual. The function `apify.scrapy.apply_apify_settings` is then used to configure Scrapy settings with Apify-specific components before the spider is executed. The key components and other helper functions are described in the next section.
-## Manual setup
+## Key integration components
-If you prefer not to use the template, you will need to manually configure several components to integrate Scrapy with the Apify SDK.
+The Apify SDK provides several custom components to support integration with the Apify platform:
-### Event loop & reactor
+- [`apify.scrapy.ApifyScheduler`](https://docs.apify.com/sdk/python/reference/class/ApifyScheduler) - Replaces Scrapy's default [scheduler](https://docs.scrapy.org/en/latest/topics/scheduler.html) with one that uses Apify's [request queue](https://docs.apify.com/platform/storage/request-queue) for storing requests. It manages enqueuing, dequeuing, and maintaining the state and priority of requests.
+- [`apify.scrapy.ActorDatasetPushPipeline`](https://docs.apify.com/sdk/python/reference/class/ActorDatasetPushPipeline) - A Scrapy [item pipeline](https://docs.scrapy.org/en/latest/topics/item-pipeline.html) that pushes scraped items to Apify's [dataset](https://docs.apify.com/platform/storage/dataset). When enabled, every item produced by the spider is sent to the dataset.
+- [`apify.scrapy.ApifyHttpProxyMiddleware`](https://docs.apify.com/sdk/python/reference/class/ApifyHttpProxyMiddleware) - A Scrapy [middleware](https://docs.scrapy.org/en/latest/topics/downloader-middleware.html) that manages proxy configurations. This middleware replaces Scrapy's default `HttpProxyMiddleware` to facilitate the use of Apify's proxy service.
-The Apify SDK is built on Python's asynchronous [`asyncio`](https://docs.python.org/3/library/asyncio.html) library, whereas Scrapy uses [`twisted`](https://twisted.org/) for its asynchronous operations. To make these two frameworks work together, you need to:
+Additional helper functions in the [`apify.scrapy`](https://github.com/apify/apify-sdk-python/tree/master/src/apify/scrapy) subpackage include:
-- Set the [`AsyncioSelectorReactor`](https://docs.scrapy.org/en/latest/topics/asyncio.html#installing-the-asyncio-reactor) in Scrapy's project settings: This reactor is `twisted`'s implementation of the `asyncio` event loop, enabling compatibility between the two libraries.
-- Install [`nest_asyncio`](https://pypi.org/project/nest-asyncio/): The `nest_asyncio` package allows the asyncio event loop to run within an already running loop, which is essential for integration with the Apify SDK.
+- `apply_apify_settings` - Applies Apify-specific components to Scrapy settings.
+- `to_apify_request` and `to_scrapy_request` - Convert between Apify and Scrapy request objects.
+- `initialize_logging` - Configures logging for the Actor environment.
+- `run_scrapy_actor` - Bridges asyncio and Twisted event loops.
-By making these adjustments, you can ensure collaboration between `twisted`-based Scrapy and the `asyncio`-based Apify SDK.
+## Create a new Apify-Scrapy project
-### Other components
+The simplest way to start using Scrapy in Apify Actors is to use the [Scrapy Actor template](https://apify.com/templates/python-scrapy). The template provides a pre-configured project structure and setup that includes all necessary components to run Scrapy spiders as Actors and store their output in Apify datasets. If you prefer manual setup, refer to the example Actor section below for configuration details.
-We also prepared other Scrapy components to work with Apify SDK, they are available in the [`apify/scrapy`](https://github.com/apify/apify-sdk-python/tree/master/src/apify/scrapy) sub-package. These components include:
+## Wrapping an existing Scrapy project
-- `ApifyScheduler`: A Scrapy scheduler that uses the Apify Request Queue to manage requests.
-- `ApifyHttpProxyMiddleware`: A Scrapy middleware for working with Apify proxies.
-- `ActorDatasetPushPipeline`: A Scrapy item pipeline that pushes scraped items into the Apify dataset.
+The Apify CLI supports converting an existing Scrapy project into an Apify Actor with a single command. The CLI expects the project to follow the standard Scrapy layout (including a `scrapy.cfg` file in the project root). During the wrapping process, the CLI:
-The module contains other helper functions, like `apply_apify_settings` for applying these components to Scrapy settings, and `to_apify_request` and `to_scrapy_request` for converting between Apify and Scrapy request objects.
+- Creates the necessary files and directories for an Apify Actor.
+- Installs the Apify SDK and required dependencies.
+- Updates Scrapy settings to include Apify-specific components.
+
+For further details, see the [Scrapy migration guide](https://docs.apify.com/cli/docs/integrating-scrapy).
## Example Actor
-Here is an example of a Scrapy Actor that scrapes the titles of web pages and enqueues all links found on each page. This example is identical to the one provided in the Apify Actor templates.
+The following example demonstrates a Scrapy Actor that scrapes page titles and enqueues links found on each page. This example aligns with the structure provided in the Apify Actor templates.
@@ -68,29 +72,36 @@ Here is an example of a Scrapy Actor that scrapes the titles of web pages and en
{UnderscoreMainExample}
-
+
{MainExample}
-
+
- {ItemsExample}
+ {SettingsExample}
-
+
- {SettingsExample}
+ {ItemsExample}
-
+
- {TitleSpiderExample}
+ {SpidersExample}
## Conclusion
-In this guide you learned how to use Scrapy in Apify Actors. You can now start building your own web scraping projects
-using Scrapy, the Apify SDK and host them on the Apify platform. See the [Actor templates](https://apify.com/templates/categories/python) to get started with your own scraping tasks. If you have questions or need assistance, feel free to reach out on our [GitHub](https://github.com/apify/apify-sdk-python) or join our [Discord community](https://discord.com/invite/jyEM2PRvMU). Happy scraping!
+In this guide you learned how to use Scrapy in Apify Actors. You can now start building your own web scraping projects using Scrapy, the Apify SDK and host them on the Apify platform. See the [Actor templates](https://apify.com/templates/categories/python) to get started with your own scraping tasks. If you have questions or need assistance, feel free to reach out on our [GitHub](https://github.com/apify/apify-sdk-python) or join our [Discord community](https://discord.com/invite/jyEM2PRvMU). Happy scraping!
+
+## Additional resources
+
+- [Apify CLI: Integrating Scrapy projects](https://docs.apify.com/cli/docs/integrating-scrapy)
+- [Apify: Run Scrapy spiders on Apify](https://apify.com/run-scrapy-in-cloud)
+- [Apify templates: Pyhon Actor Scrapy template](https://apify.com/templates/python-scrapy)
+- [Apify store: Scrapy Books Example Actor](https://apify.com/vdusek/scrapy-books-example)
+- [Scrapy: Official documentation](https://docs.scrapy.org/)
diff --git a/docs/02_guides/code/scrapy_src/__init__.py b/docs/02_guides/code/scrapy_project/src/__init__.py
similarity index 100%
rename from docs/02_guides/code/scrapy_src/__init__.py
rename to docs/02_guides/code/scrapy_project/src/__init__.py
diff --git a/docs/02_guides/code/scrapy_project/src/__main__.py b/docs/02_guides/code/scrapy_project/src/__main__.py
new file mode 100644
index 00000000..3dcbf75c
--- /dev/null
+++ b/docs/02_guides/code/scrapy_project/src/__main__.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+from twisted.internet import asyncioreactor
+
+# Install Twisted's asyncio reactor before importing any other Twisted or Scrapy components.
+asyncioreactor.install() # type: ignore[no-untyped-call]
+
+import os
+
+from apify.scrapy import initialize_logging, run_scrapy_actor
+
+# Import your main Actor coroutine here.
+from .main import main
+
+# Ensure the location to the Scrapy settings module is defined.
+os.environ['SCRAPY_SETTINGS_MODULE'] = 'src.settings'
+
+
+if __name__ == '__main__':
+ initialize_logging()
+ run_scrapy_actor(main())
diff --git a/docs/02_guides/code/scrapy_project/src/items.py b/docs/02_guides/code/scrapy_project/src/items.py
new file mode 100644
index 00000000..6579083f
--- /dev/null
+++ b/docs/02_guides/code/scrapy_project/src/items.py
@@ -0,0 +1,10 @@
+from __future__ import annotations
+
+from scrapy import Field, Item
+
+
+class TitleItem(Item):
+ """Represents a title item scraped from a web page."""
+
+ url = Field()
+ title = Field()
diff --git a/docs/02_guides/code/scrapy_project/src/main.py b/docs/02_guides/code/scrapy_project/src/main.py
new file mode 100644
index 00000000..a5586a25
--- /dev/null
+++ b/docs/02_guides/code/scrapy_project/src/main.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from scrapy.crawler import CrawlerRunner
+from scrapy.utils.defer import deferred_to_future
+
+from apify import Actor
+from apify.scrapy import apply_apify_settings
+
+# Import your Scrapy spider here.
+from .spiders import TitleSpider as Spider
+
+
+async def main() -> None:
+ """Apify Actor main coroutine for executing the Scrapy spider."""
+ async with Actor:
+ # Retrieve and process Actor input.
+ actor_input = await Actor.get_input() or {}
+ start_urls = [url['url'] for url in actor_input.get('startUrls', [])]
+ allowed_domains = actor_input.get('allowedDomains')
+ proxy_config = actor_input.get('proxyConfiguration')
+
+ # Apply Apify settings, which will override the Scrapy project settings.
+ settings = apply_apify_settings(proxy_config=proxy_config)
+
+ # Create CrawlerRunner and execute the Scrapy spider.
+ crawler_runner = CrawlerRunner(settings)
+ crawl_deferred = crawler_runner.crawl(
+ Spider,
+ start_urls=start_urls,
+ allowed_domains=allowed_domains,
+ )
+ await deferred_to_future(crawl_deferred)
diff --git a/docs/02_guides/code/scrapy_src/py.typed b/docs/02_guides/code/scrapy_project/src/py.typed
similarity index 100%
rename from docs/02_guides/code/scrapy_src/py.typed
rename to docs/02_guides/code/scrapy_project/src/py.typed
diff --git a/docs/02_guides/code/scrapy_project/src/settings.py b/docs/02_guides/code/scrapy_project/src/settings.py
new file mode 100644
index 00000000..ed51668a
--- /dev/null
+++ b/docs/02_guides/code/scrapy_project/src/settings.py
@@ -0,0 +1,8 @@
+BOT_NAME = 'titlebot'
+DEPTH_LIMIT = 1
+LOG_LEVEL = 'INFO'
+NEWSPIDER_MODULE = 'src.spiders'
+ROBOTSTXT_OBEY = True
+SPIDER_MODULES = ['src.spiders']
+TELNETCONSOLE_ENABLED = False
+TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
diff --git a/docs/02_guides/code/scrapy_project/src/spiders/__init__.py b/docs/02_guides/code/scrapy_project/src/spiders/__init__.py
new file mode 100644
index 00000000..f63ac977
--- /dev/null
+++ b/docs/02_guides/code/scrapy_project/src/spiders/__init__.py
@@ -0,0 +1 @@
+from .title import TitleSpider
diff --git a/docs/02_guides/code/scrapy_src/spiders/py.typed b/docs/02_guides/code/scrapy_project/src/spiders/py.typed
similarity index 100%
rename from docs/02_guides/code/scrapy_src/spiders/py.typed
rename to docs/02_guides/code/scrapy_project/src/spiders/py.typed
diff --git a/docs/02_guides/code/scrapy_src/spiders/title.py b/docs/02_guides/code/scrapy_project/src/spiders/title.py
similarity index 51%
rename from docs/02_guides/code/scrapy_src/spiders/title.py
rename to docs/02_guides/code/scrapy_project/src/spiders/title.py
index 7be37b68..ed54b3c3 100644
--- a/docs/02_guides/code/scrapy_src/spiders/title.py
+++ b/docs/02_guides/code/scrapy_project/src/spiders/title.py
@@ -1,8 +1,6 @@
-# ruff: noqa: TID252, RUF012
-
from __future__ import annotations
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Any
from urllib.parse import urljoin
from scrapy import Request, Spider
@@ -16,20 +14,36 @@
class TitleSpider(Spider):
- """Scrapes title pages and enqueues all links found on the page."""
-
- name = 'title_spider'
+ """A spider that scrapes web pages to extract titles and discover new links.
- # The `start_urls` specified in this class will be merged with the `start_urls` value from your Actor input
- # when the project is executed using Apify.
- start_urls = ['https://apify.com/']
+ This spider retrieves the content of the
element from each page and queues
+ any valid hyperlinks for further crawling.
+ """
- # Scrape only the pages within the Apify domain.
- allowed_domains = ['apify.com']
+ name = 'title_spider'
# Limit the number of pages to scrape.
custom_settings = {'CLOSESPIDER_PAGECOUNT': 10}
+ def __init__(
+ self,
+ start_urls: list[str],
+ allowed_domains: list[str],
+ *args: Any,
+ **kwargs: Any,
+ ) -> None:
+ """A default costructor.
+
+ Args:
+ start_urls: URLs to start the scraping from.
+ allowed_domains: Domains that the scraper is allowed to crawl.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+ """
+ super().__init__(*args, **kwargs)
+ self.start_urls = start_urls
+ self.allowed_domains = allowed_domains
+
def parse(self, response: Response) -> Generator[TitleItem | Request, None, None]:
"""Parse the web page response.
@@ -37,7 +51,7 @@ def parse(self, response: Response) -> Generator[TitleItem | Request, None, None
response: The web page response.
Yields:
- Yields scraped TitleItem and Requests for links.
+ Yields scraped `TitleItem` and new `Request` objects for links.
"""
self.logger.info('TitleSpider is parsing %s...', response)
@@ -46,7 +60,7 @@ def parse(self, response: Response) -> Generator[TitleItem | Request, None, None
title = response.css('title::text').extract_first()
yield TitleItem(url=url, title=title)
- # Extract all links from the page, create Requests out of them, and yield them
+ # Extract all links from the page, create `Request` objects out of them, and yield them.
for link_href in response.css('a::attr("href")'):
link_url = urljoin(response.url, link_href.get())
if link_url.startswith(('http://', 'https://')):
diff --git a/docs/02_guides/code/scrapy_src/__main__.py b/docs/02_guides/code/scrapy_src/__main__.py
deleted file mode 100644
index 56d477dd..00000000
--- a/docs/02_guides/code/scrapy_src/__main__.py
+++ /dev/null
@@ -1,121 +0,0 @@
-"""Apify Actor integration for Scrapy projects.
-
-This module transforms a Scrapy project into an Apify Actor, handling the configuration of logging, patching Scrapy's
-logging system, and establishing the required environment to run the Scrapy spider within the Apify platform.
-
-This file is specifically designed to be executed when the project is run as an Apify Actor using `apify run` locally
-or being run on the Apify platform. It is not being executed when running the project as a Scrapy project using
-`scrapy crawl title_spider`.
-
-We recommend you do not modify this file unless you really know what you are doing.
-"""
-
-# ruff: noqa: E402
-
-# We need to configure the logging first before we import anything else, so that nothing else imports
-# `scrapy.utils.log` before we patch it.
-from __future__ import annotations
-
-from logging import StreamHandler, getLogger
-from typing import Any
-
-from scrapy.utils import log as scrapy_logging
-from scrapy.utils.project import get_project_settings
-
-from apify.log import ActorLogFormatter
-
-# Define names of the loggers.
-MAIN_LOGGER_NAMES = ['apify', 'apify_client', 'scrapy']
-OTHER_LOGGER_NAMES = ['filelock', 'hpack', 'httpcore', 'httpx', 'protego', 'twisted']
-ALL_LOGGER_NAMES = MAIN_LOGGER_NAMES + OTHER_LOGGER_NAMES
-
-# To change the logging level, modify the `LOG_LEVEL` field in `settings.py`. If the field is not present in the file,
-# Scrapy will default to `DEBUG`. This setting applies to all loggers. If you wish to change the logging level for
-# a specific logger, do it in this file.
-settings = get_project_settings()
-LOGGING_LEVEL = settings['LOG_LEVEL']
-
-# Define a logging handler which will be used for the loggers.
-apify_handler = StreamHandler()
-apify_handler.setFormatter(ActorLogFormatter(include_logger_name=True))
-
-
-def configure_logger(logger_name: str | None, log_level: str, *handlers: StreamHandler) -> None:
- """Configure a logger with the specified settings.
-
- Args:
- logger_name: The name of the logger to be configured.
- log_level: The desired logging level ('DEBUG', 'INFO', 'WARNING', 'ERROR', ...).
- handlers: Optional list of logging handlers.
- """
- logger = getLogger(logger_name)
- logger.setLevel(log_level)
- logger.handlers = []
-
- for handler in handlers:
- logger.addHandler(handler)
-
-
-# Apify loggers have to be set up here and in the `new_configure_logging` as well to be able to use them both from
-# the `main.py` and Scrapy components.
-for logger_name in MAIN_LOGGER_NAMES:
- configure_logger(logger_name, LOGGING_LEVEL, apify_handler)
-
-# We can't attach our log handler to the loggers normally, because Scrapy would remove them in the `configure_logging`
-# call here: https://github.com/scrapy/scrapy/blob/2.11.0/scrapy/utils/log.py#L113 (even though
-# `disable_existing_loggers` is set to False :facepalm:). We need to monkeypatch Scrapy's `configure_logging` method
-# like this, so that our handler is attached right after Scrapy calls the `configure_logging` method, because
-# otherwise we would lose some log messages.
-old_configure_logging = scrapy_logging.configure_logging
-
-
-def new_configure_logging(*args: Any, **kwargs: Any) -> None:
- """Configure logging for Scrapy and root loggers to ensure consistent logging behavior.
-
- We need to manually configure both the root logger and all Scrapy-associated loggers. Configuring only the root
- logger is not sufficient, as Scrapy will override it with its own settings. Scrapy uses these four primary
- loggers - https://github.com/scrapy/scrapy/blob/2.11.0/scrapy/utils/log.py#L60:L77. Therefore, we configure here
- these four loggers and the root logger.
- """
- old_configure_logging(*args, **kwargs)
-
- # We modify the root (None) logger to ensure proper display of logs from spiders when using the `self.logger`
- # property within spiders. See details in the Spider logger property:
- # https://github.com/scrapy/scrapy/blob/2.11.0/scrapy/spiders/__init__.py#L43:L46.
- configure_logger(None, LOGGING_LEVEL, apify_handler)
-
- # We modify other loggers only by setting up their log level. A custom log handler is added
- # only to the root logger to avoid duplicate log messages.
- for logger_name in ALL_LOGGER_NAMES:
- configure_logger(logger_name, LOGGING_LEVEL)
-
- # Set the HTTPX logger explicitly to the WARNING level, because it is too verbose and spams the logs with useless
- # messages, especially when running on the platform.
- configure_logger('httpx', 'WARNING')
-
-
-scrapy_logging.configure_logging = new_configure_logging
-
-# Now we can do the rest of the setup.
-import asyncio
-import os
-
-import nest_asyncio
-from scrapy.utils.reactor import install_reactor
-
-from .main import main
-
-# For compatibility between Twisted (used by Scrapy) and AsyncIO (used by Apify) asynchronous libraries, it is
-# necessary to set the Twisted reactor to `AsyncioSelectorReactor`. This setup allows the two asynchronous libraries
-# to work together.
-#
-# Note: The reactor must be installed before applying `nest_asyncio.apply()`, otherwise, it will not work correctly
-# on Windows.
-install_reactor('twisted.internet.asyncioreactor.AsyncioSelectorReactor')
-nest_asyncio.apply()
-
-# Specify the path to the Scrapy project settings module.
-os.environ['SCRAPY_SETTINGS_MODULE'] = 'src.settings'
-
-# Run the Apify main coroutine in the event loop.
-asyncio.run(main())
diff --git a/docs/02_guides/code/scrapy_src/items.py b/docs/02_guides/code/scrapy_src/items.py
deleted file mode 100644
index eae7ff23..00000000
--- a/docs/02_guides/code/scrapy_src/items.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""Scrapy item models module.
-
-This module defines Scrapy item models for scraped data. Items represent structured data
-extracted by spiders.
-
-For detailed information on creating and utilizing items, refer to the official documentation:
-https://docs.scrapy.org/en/latest/topics/items.html
-"""
-
-from scrapy import Field, Item
-
-
-class TitleItem(Item):
- """Represents a title item scraped from a web page."""
-
- url = Field()
- title = Field()
diff --git a/docs/02_guides/code/scrapy_src/main.py b/docs/02_guides/code/scrapy_src/main.py
deleted file mode 100644
index 1a878c5b..00000000
--- a/docs/02_guides/code/scrapy_src/main.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""This module defines the main entry point for the Apify Actor.
-
-This module defines the main coroutine for the Apify Scrapy Actor, executed from the __main__.py file. The coroutine
-processes the Actor's input and executes the Scrapy spider. Additionally, it updates Scrapy project settings by
-applying Apify-related settings. Which includes adding a custom scheduler, retry middleware, and an item pipeline
-for pushing data to the Apify dataset.
-
-Customization:
---------------
-
-Feel free to customize this file to add specific functionality to the Actor, such as incorporating your own Scrapy
-components like spiders and handling Actor input. However, make sure you have a clear understanding of your
-modifications. For instance, removing `apply_apify_settings` break the integration between Scrapy and Apify.
-
-Documentation:
---------------
-
-For an in-depth description of the Apify-Scrapy integration process, our Scrapy components, known limitations and
-other stuff, please refer to the following documentation page: https://docs.apify.com/cli/docs/integrating-scrapy.
-"""
-
-from __future__ import annotations
-
-from scrapy.crawler import CrawlerProcess
-
-# Import your Scrapy spider here.
-from .spiders.title import TitleSpider as Spider
-from apify import Actor
-from apify.scrapy.utils import apply_apify_settings
-
-# Default input values for local execution using `apify run`.
-LOCAL_DEFAULT_START_URLS = [{'url': 'https://apify.com'}]
-
-
-async def main() -> None:
- """Apify Actor main coroutine for executing the Scrapy spider."""
- # Enter the context of the Actor.
- async with Actor:
- Actor.log.info('Actor is being executed...')
-
- # Retrieve and process Actor input.
- actor_input = await Actor.get_input() or {}
- start_urls = actor_input.get('startUrls', LOCAL_DEFAULT_START_URLS)
- proxy_config = actor_input.get('proxyConfiguration')
-
- # Open the default request queue for handling URLs to be processed.
- request_queue = await Actor.open_request_queue()
-
- # Enqueue the start URLs.
- for start_url in start_urls:
- url = start_url.get('url')
- await request_queue.add_request(url)
-
- # Apply Apify settings, it will override the Scrapy project settings.
- settings = apply_apify_settings(proxy_config=proxy_config)
-
- # Execute the spider using Scrapy `CrawlerProcess`.
- process = CrawlerProcess(settings, install_root_handler=False)
- process.crawl(Spider)
- process.start()
diff --git a/docs/02_guides/code/scrapy_src/settings.py b/docs/02_guides/code/scrapy_src/settings.py
deleted file mode 100644
index 8a0fd3e6..00000000
--- a/docs/02_guides/code/scrapy_src/settings.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""Scrapy settings module.
-
-This module contains Scrapy settings for the project, defining various configurations and options.
-
-For more comprehensive details on Scrapy settings, refer to the official documentation:
-http://doc.scrapy.org/en/latest/topics/settings.html
-"""
-
-BOT_NAME = 'titlebot'
-DEPTH_LIMIT = 1
-LOG_LEVEL = 'INFO'
-NEWSPIDER_MODULE = 'spiders'
-REQUEST_FINGERPRINTER_IMPLEMENTATION = '2.7'
-ROBOTSTXT_OBEY = True
-SPIDER_MODULES = ['spiders']
diff --git a/docs/02_guides/code/scrapy_src/spiders/__init__.py b/docs/02_guides/code/scrapy_src/spiders/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/pyproject.toml b/pyproject.toml
index bb27ebb2..f479bb84 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -136,6 +136,18 @@ indent-style = "space"
"TRY301", # Abstract `raise` to an inner function
"PLW0603", # Using the global statement to update `{name}` is discouraged
]
+"**/docs/**/scrapy_project/**/__main__.py" = [
+ # Because of asyncioreactor.install() call.
+ "E402", # Module level import not at top of file
+]
+"**/docs/**/scrapy_project/**" = [
+ # Local imports are mixed up with the Apify SDK.
+ "I001", # Import block is un-sorted or un-formatted
+ # Class variables are common in Scrapy projects.
+ "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar`
+ # Local imports in Scrapy project.
+ "TID252", # Prefer absolute imports over relative imports from parent modules
+]
[tool.ruff.lint.flake8-quotes]
docstring-quotes = "double"
diff --git a/src/apify/_actor.py b/src/apify/_actor.py
index 71e3b6e2..d675f1bd 100644
--- a/src/apify/_actor.py
+++ b/src/apify/_actor.py
@@ -270,8 +270,8 @@ async def finalize() -> None:
self.log.debug(f'Not calling sys.exit({exit_code}) because Actor is running in IPython')
elif os.getenv('PYTEST_CURRENT_TEST', default=False): # noqa: PLW1508
self.log.debug(f'Not calling sys.exit({exit_code}) because Actor is running in an unit test')
- elif hasattr(asyncio, '_nest_patched'):
- self.log.debug(f'Not calling sys.exit({exit_code}) because Actor is running in a nested event loop')
+ elif os.getenv('SCRAPY_SETTINGS_MODULE'):
+ self.log.debug(f'Not calling sys.exit({exit_code}) because Actor is running with Scrapy')
else:
sys.exit(exit_code)
diff --git a/src/apify/scrapy/__init__.py b/src/apify/scrapy/__init__.py
index 3a665c99..dfab9943 100644
--- a/src/apify/scrapy/__init__.py
+++ b/src/apify/scrapy/__init__.py
@@ -1,11 +1,32 @@
-from apify.scrapy.requests import to_apify_request, to_scrapy_request
-from apify.scrapy.scheduler import ApifyScheduler
-from apify.scrapy.utils import get_basic_auth_header, get_running_event_loop_id
+from crawlee._utils.try_import import install_import_hook as _install_import_hook
+from crawlee._utils.try_import import try_import as _try_import
+
+_install_import_hook(__name__)
+
+# The following imports use try_import to handle optional dependencies, as they may not always be available.
+
+with _try_import(__name__, 'run_scrapy_actor'):
+ from ._actor_runner import run_scrapy_actor
+
+with _try_import(__name__, 'initialize_logging'):
+ from ._logging_config import initialize_logging
+
+with _try_import(__name__, 'to_apify_request', 'to_scrapy_request'):
+ from .requests import to_apify_request, to_scrapy_request
+
+with _try_import(__name__, 'ApifyScheduler'):
+ from .scheduler import ApifyScheduler
+
+with _try_import(__name__, 'apply_apify_settings', 'get_basic_auth_header'):
+ from .utils import apply_apify_settings, get_basic_auth_header
+
__all__ = [
'ApifyScheduler',
+ 'apply_apify_settings',
'get_basic_auth_header',
- 'get_running_event_loop_id',
+ 'initialize_logging',
+ 'run_scrapy_actor',
'to_apify_request',
'to_scrapy_request',
]
diff --git a/src/apify/scrapy/_actor_runner.py b/src/apify/scrapy/_actor_runner.py
new file mode 100644
index 00000000..390b2fc3
--- /dev/null
+++ b/src/apify/scrapy/_actor_runner.py
@@ -0,0 +1,26 @@
+from __future__ import annotations
+
+import asyncio
+from typing import TYPE_CHECKING
+
+from twisted.internet.defer import Deferred, ensureDeferred
+from twisted.internet.task import react
+
+if TYPE_CHECKING:
+ from collections.abc import Coroutine
+
+
+async def _run_coro_as_deferred(coro: Coroutine) -> None:
+ """Wrap the given asyncio coroutine in a Task and await its result as a Twisted Deferred."""
+ task = asyncio.ensure_future(coro)
+ await Deferred.fromFuture(task)
+
+
+def run_scrapy_actor(coro: Coroutine) -> None:
+ """Start Twisted's reactor and execute the provided Actor coroutine.
+
+ This function initiates the Twisted reactor and runs the given asyncio coroutine (typically the
+ Actor's main) by converting it to a Deferred. This bridges the asyncio and Twisted event loops,
+ enabling the Apify and Scrapy integration to work together.
+ """
+ react(lambda _: ensureDeferred(_run_coro_as_deferred(coro)))
diff --git a/src/apify/scrapy/_async_thread.py b/src/apify/scrapy/_async_thread.py
new file mode 100644
index 00000000..166e8b17
--- /dev/null
+++ b/src/apify/scrapy/_async_thread.py
@@ -0,0 +1,122 @@
+from __future__ import annotations
+
+import asyncio
+import threading
+from concurrent import futures
+from datetime import timedelta
+from logging import getLogger
+from typing import TYPE_CHECKING, Any
+
+if TYPE_CHECKING:
+ from collections.abc import Coroutine
+
+logger = getLogger(__name__)
+
+
+class AsyncThread:
+ """Class for running an asyncio event loop in a separate thread.
+
+ This allows running asynchronous coroutines from synchronous code by executingthem on an event loop
+ that runs in its own dedicated thread.
+ """
+
+ def __init__(self) -> None:
+ self._eventloop = asyncio.new_event_loop()
+
+ # Start the event loop in a dedicated daemon thread.
+ self._thread = threading.Thread(
+ target=self._start_event_loop,
+ daemon=True,
+ )
+ self._thread.start()
+
+ def run_coro(
+ self,
+ coro: Coroutine,
+ timeout: timedelta = timedelta(seconds=60),
+ ) -> Any:
+ """Run a coroutine on an event loop running in a separate thread.
+
+ This method schedules the coroutine to run on the event loop and blocks until the coroutine completes
+ or the specified timeout is reached.
+
+ Args:
+ coro: The coroutine to run.
+ timeout: The maximum number of seconds to wait for the coroutine to finish.
+
+ Returns:
+ The result returned by the coroutine.
+
+ Raises:
+ RuntimeError: If the event loop is not running.
+ TimeoutError: If the coroutine does not complete within the timeout.
+ Exception: Any exception raised during coroutine execution.
+ """
+ if not self._eventloop.is_running():
+ raise RuntimeError(f'The coroutine {coro} cannot be executed because the event loop is not running.')
+
+ # Submit the coroutine to the event loop running in the other thread.
+ future = asyncio.run_coroutine_threadsafe(coro, self._eventloop)
+ try:
+ # Wait for the coroutine's result until the specified timeout.
+ return future.result(timeout=timeout.total_seconds())
+ except futures.TimeoutError as exc:
+ logger.exception('Coroutine execution timed out.', exc_info=exc)
+ raise
+ except Exception as exc:
+ logger.exception('Coroutine execution raised an exception.', exc_info=exc)
+ raise
+
+ def close(self, timeout: timedelta = timedelta(seconds=60)) -> None:
+ """Close the event loop and its thread gracefully.
+
+ This method cancels all pending tasks, stops the event loop, and waits for the thread to exit.
+ If the thread does not exit within the given timeout, a forced shutdown is attempted.
+
+ Args:
+ timeout: The maximum number of seconds to wait for the event loop thread to exit.
+ """
+ if self._eventloop.is_running():
+ # Cancel all pending tasks in the event loop.
+ self.run_coro(self._shutdown_tasks())
+
+ # Schedule the event loop to stop.
+ self._eventloop.call_soon_threadsafe(self._eventloop.stop)
+
+ # Wait for the event loop thread to finish execution.
+ self._thread.join(timeout=timeout.total_seconds())
+
+ # If the thread is still running after the timeout, force a shutdown.
+ if self._thread.is_alive():
+ logger.warning('Event loop thread did not exit cleanly! Forcing shutdown...')
+ self._force_exit_event_loop()
+
+ def _start_event_loop(self) -> None:
+ """Set up and run the asyncio event loop in the dedicated thread."""
+ asyncio.set_event_loop(self._eventloop)
+ try:
+ self._eventloop.run_forever()
+ finally:
+ self._eventloop.close()
+ logger.debug('Asyncio event loop has been closed.')
+
+ async def _shutdown_tasks(self) -> None:
+ """Cancel all pending tasks in the event loop."""
+ # Retrieve all tasks for the event loop, excluding the current task.
+ tasks = [task for task in asyncio.all_tasks(self._eventloop) if task is not asyncio.current_task()]
+
+ # Cancel each pending task.
+ for task in tasks:
+ task.cancel()
+
+ # Wait until all tasks have been cancelled or finished.
+ await asyncio.gather(*tasks, return_exceptions=True)
+
+ def _force_exit_event_loop(self) -> None:
+ """Forcefully shut down the event loop and its thread."""
+ try:
+ logger.info('Forced shutdown of the event loop and its thread...')
+ self._eventloop.call_soon_threadsafe(self._eventloop.stop)
+ self._thread.join(timeout=5)
+ except Exception as exc:
+ logger.exception('Exception occurred during forced event loop shutdown.', exc_info=exc)
diff --git a/src/apify/scrapy/_logging_config.py b/src/apify/scrapy/_logging_config.py
new file mode 100644
index 00000000..45cd2908
--- /dev/null
+++ b/src/apify/scrapy/_logging_config.py
@@ -0,0 +1,55 @@
+from __future__ import annotations
+
+import logging
+from typing import Any
+
+from scrapy.utils import log as scrapy_logging
+from scrapy.utils.project import get_project_settings
+
+from apify.log import ActorLogFormatter
+
+# Define logger names.
+_PRIMARY_LOGGERS = ['apify', 'apify_client', 'scrapy']
+_SUPPLEMENTAL_LOGGERS = ['filelock', 'hpack', 'httpcore', 'httpx', 'protego', 'twisted']
+_ALL_LOGGERS = _PRIMARY_LOGGERS + _SUPPLEMENTAL_LOGGERS
+
+
+def _configure_logger(name: str | None, logging_level: str, handler: logging.Handler) -> None:
+ """Clear and reconfigure the logger."""
+ logger = logging.getLogger(name)
+ logger.handlers.clear()
+ logger.setLevel(logging_level)
+
+ if name is None: # Root logger.
+ logger.addHandler(handler)
+ logger.propagate = False
+ else:
+ logger.propagate = True
+
+
+def initialize_logging() -> None:
+ """Configure logging for Apify Actors and adjust Scrapy's logging settings."""
+ # Retrieve Scrapy project settings and determine the logging level.
+ settings = get_project_settings()
+ logging_level = settings.get('LOG_LEVEL', 'INFO') # Default to INFO.
+
+ # Create a custom handler with the Apify log formatter.
+ handler = logging.StreamHandler()
+ handler.setFormatter(ActorLogFormatter(include_logger_name=True))
+
+ # Configure the root logger and all other defined loggers.
+ for logger_name in [None, *_ALL_LOGGERS]:
+ _configure_logger(logger_name, logging_level, handler)
+
+ # Set the 'httpx' logger to a less verbose level.
+ logging.getLogger('httpx').setLevel('WARNING')
+
+ # Monkey-patch Scrapy's logging configuration to re-apply our settings.
+ original_configure_logging = scrapy_logging.configure_logging
+
+ def new_configure_logging(*args: Any, **kwargs: Any) -> None:
+ original_configure_logging(*args, **kwargs)
+ for logger_name in [None, *_ALL_LOGGERS]:
+ _configure_logger(logger_name, logging_level, handler)
+
+ scrapy_logging.configure_logging = new_configure_logging
diff --git a/src/apify/scrapy/middlewares/apify_proxy.py b/src/apify/scrapy/middlewares/apify_proxy.py
index f81be3c4..4721a248 100644
--- a/src/apify/scrapy/middlewares/apify_proxy.py
+++ b/src/apify/scrapy/middlewares/apify_proxy.py
@@ -3,19 +3,15 @@
from typing import TYPE_CHECKING
from urllib.parse import ParseResult, urlparse
-try:
- if TYPE_CHECKING:
- from scrapy import Request, Spider
- from scrapy.crawler import Crawler
- from scrapy.core.downloader.handlers.http11 import TunnelError
- from scrapy.exceptions import NotConfigured
-except ImportError as exc:
- raise ImportError(
- 'To use this module, you need to install the "scrapy" extra. Run "pip install apify[scrapy]".',
- ) from exc
+from scrapy.core.downloader.handlers.http11 import TunnelError
+from scrapy.exceptions import NotConfigured
from apify import Actor, ProxyConfiguration
-from apify.scrapy.utils import get_basic_auth_header
+from apify.scrapy import get_basic_auth_header
+
+if TYPE_CHECKING:
+ from scrapy import Request, Spider
+ from scrapy.crawler import Crawler
class ApifyHttpProxyMiddleware:
@@ -51,7 +47,7 @@ def from_crawler(cls: type[ApifyHttpProxyMiddleware], crawler: Crawler) -> Apify
proxy_settings: dict | None = crawler.settings.get('APIFY_PROXY_SETTINGS')
if proxy_settings is None:
- Actor.log.warning(
+ Actor.log.info(
'ApifyHttpProxyMiddleware is not going to be used. Object "proxyConfiguration" is probably missing '
' in the Actor input.'
)
@@ -60,7 +56,7 @@ def from_crawler(cls: type[ApifyHttpProxyMiddleware], crawler: Crawler) -> Apify
use_apify_proxy = proxy_settings.get('useApifyProxy', False)
if use_apify_proxy is not True:
- Actor.log.warning(
+ Actor.log.info(
'ApifyHttpProxyMiddleware is not going to be used. Actor input field '
'"proxyConfiguration.useApifyProxy" is set to False.'
)
diff --git a/src/apify/scrapy/pipelines/actor_dataset_push.py b/src/apify/scrapy/pipelines/actor_dataset_push.py
index d2d983cc..995af774 100644
--- a/src/apify/scrapy/pipelines/actor_dataset_push.py
+++ b/src/apify/scrapy/pipelines/actor_dataset_push.py
@@ -1,19 +1,17 @@
from __future__ import annotations
+from logging import getLogger
from typing import TYPE_CHECKING
from itemadapter.adapter import ItemAdapter
-try:
- if TYPE_CHECKING:
- from scrapy import Item, Spider
-except ImportError as exc:
- raise ImportError(
- 'To use this module, you need to install the "scrapy" extra. Run "pip install apify[scrapy]".',
- ) from exc
-
from apify import Actor
+if TYPE_CHECKING:
+ from scrapy import Item, Spider
+
+logger = getLogger(__name__)
+
class ActorDatasetPushPipeline:
"""A Scrapy pipeline for pushing items to an Actor's default dataset.
@@ -28,6 +26,6 @@ async def process_item(
) -> Item:
"""Pushes the provided Scrapy item to the Actor's default dataset."""
item_dict = ItemAdapter(item).asdict()
- Actor.log.debug(f'Pushing item={item_dict} produced by spider={spider} to the dataset.')
+ logger.debug(f'Pushing item={item_dict} produced by spider={spider} to the dataset.')
await Actor.push_data(item_dict)
return item
diff --git a/src/apify/scrapy/requests.py b/src/apify/scrapy/requests.py
index 4ded045f..6a4badc4 100644
--- a/src/apify/scrapy/requests.py
+++ b/src/apify/scrapy/requests.py
@@ -2,37 +2,21 @@
import codecs
import pickle
+from logging import getLogger
from typing import Any, cast
-from apify_shared.utils import ignore_docs
+from scrapy import Request as ScrapyRequest
+from scrapy import Spider
+from scrapy.http.headers import Headers
+from scrapy.utils.request import request_from_dict
-try:
- from scrapy import Request, Spider
- from scrapy.http.headers import Headers
- from scrapy.utils.request import request_from_dict
-except ImportError as exc:
- raise ImportError(
- 'To use this module, you need to install the "scrapy" extra. Run "pip install apify[scrapy]".',
- ) from exc
-
-from crawlee import Request as CrawleeRequest
+from crawlee import Request as ApifyRequest
from crawlee._types import HttpHeaders
-from crawlee._utils.crypto import crypto_random_object_id
-from crawlee._utils.requests import compute_unique_key, unique_key_to_request_id
-
-from apify import Actor
-
-def _is_request_produced_by_middleware(scrapy_request: Request) -> bool:
- """Returns True if the Scrapy request was produced by a downloader middleware, otherwise False.
-
- Works for RetryMiddleware and RedirectMiddleware.
- """
- return bool(scrapy_request.meta.get('redirect_times')) or bool(scrapy_request.meta.get('retry_times'))
+logger = getLogger(__name__)
-@ignore_docs
-def to_apify_request(scrapy_request: Request, spider: Spider) -> CrawleeRequest | None:
+def to_apify_request(scrapy_request: ScrapyRequest, spider: Spider) -> ApifyRequest | None:
"""Convert a Scrapy request to an Apify request.
Args:
@@ -42,54 +26,45 @@ def to_apify_request(scrapy_request: Request, spider: Spider) -> CrawleeRequest
Returns:
The converted Apify request if the conversion was successful, otherwise None.
"""
- if not isinstance(scrapy_request, Request):
- Actor.log.warning( # type: ignore[unreachable]
- 'Failed to convert to Apify request: Scrapy request must be a Request instance.'
- )
+ if not isinstance(scrapy_request, ScrapyRequest):
+ logger.warning('Failed to convert to Apify request: Scrapy request must be a ScrapyRequest instance.') # type: ignore[unreachable]
return None
- call_id = crypto_random_object_id(8)
- Actor.log.debug(f'[{call_id}]: to_apify_request was called (scrapy_request={scrapy_request})...')
+ logger.debug(f'to_apify_request was called (scrapy_request={scrapy_request})...')
+
+ # Configuration to behave as similarly as possible to Scrapy's default RFPDupeFilter.
+ request_kwargs: dict[str, Any] = {
+ 'url': scrapy_request.url,
+ 'method': scrapy_request.method,
+ 'payload': scrapy_request.body,
+ 'use_extended_unique_key': True,
+ 'keep_url_fragment': False,
+ }
try:
- if _is_request_produced_by_middleware(scrapy_request):
- unique_key = compute_unique_key(
- url=scrapy_request.url,
- method=scrapy_request.method, # type: ignore[arg-type] # str vs literal
- payload=scrapy_request.body,
- use_extended_unique_key=True,
- )
- elif scrapy_request.dont_filter:
- unique_key = crypto_random_object_id(8)
- elif scrapy_request.meta.get('apify_request_unique_key'):
- unique_key = scrapy_request.meta['apify_request_unique_key']
+ if scrapy_request.dont_filter:
+ request_kwargs['always_enqueue'] = True
else:
- unique_key = crypto_random_object_id(8)
+ if scrapy_request.meta.get('apify_request_unique_key'):
+ request_kwargs['unique_key'] = scrapy_request.meta['apify_request_unique_key']
- if scrapy_request.meta.get('apify_request_id'):
- request_id = scrapy_request.meta['apify_request_id']
- else:
- request_id = unique_key_to_request_id(unique_key)
-
- apify_request = CrawleeRequest(
- url=scrapy_request.url,
- method=scrapy_request.method,
- payload=scrapy_request.body,
- user_data=scrapy_request.meta.get('userData', {}),
- unique_key=unique_key,
- id=request_id,
- )
+ if scrapy_request.meta.get('apify_request_id'):
+ request_kwargs['id'] = scrapy_request.meta['apify_request_id']
+
+ request_kwargs['user_data'] = scrapy_request.meta.get('userData', {})
# Convert Scrapy's headers to a HttpHeaders and store them in the apify_request
if isinstance(scrapy_request.headers, Headers):
- apify_request.headers = HttpHeaders(dict(scrapy_request.headers.to_unicode_dict()))
+ request_kwargs['headers'] = HttpHeaders(dict(scrapy_request.headers.to_unicode_dict()))
else:
- Actor.log.warning( # type: ignore[unreachable]
+ logger.warning( # type: ignore[unreachable]
f'Invalid scrapy_request.headers type, not scrapy.http.headers.Headers: {scrapy_request.headers}'
)
- # Serialize the Scrapy Request and store it in the apify_request.
- # - This process involves converting the Scrapy Request object into a dictionary, encoding it to base64,
+ apify_request = ApifyRequest.from_url(**request_kwargs)
+
+ # Serialize the Scrapy ScrapyRequest and store it in the apify_request.
+ # - This process involves converting the Scrapy ScrapyRequest object into a dictionary, encoding it to base64,
# and storing it as 'scrapy_request' within the 'userData' dictionary of the apify_request.
# - The serialization process can be referenced at: https://stackoverflow.com/questions/30469575/.
scrapy_request_dict = scrapy_request.to_dict(spider=spider)
@@ -97,15 +72,14 @@ def to_apify_request(scrapy_request: Request, spider: Spider) -> CrawleeRequest
apify_request.user_data['scrapy_request'] = scrapy_request_dict_encoded
except Exception as exc:
- Actor.log.warning(f'Conversion of Scrapy request {scrapy_request} to Apify request failed; {exc}')
+ logger.warning(f'Conversion of Scrapy request {scrapy_request} to Apify request failed; {exc}')
return None
- Actor.log.debug(f'[{call_id}]: scrapy_request was converted to the apify_request={apify_request}')
+ logger.debug(f'scrapy_request was converted to the apify_request={apify_request}')
return apify_request
-@ignore_docs
-def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
+def to_scrapy_request(apify_request: ApifyRequest, spider: Spider) -> ScrapyRequest:
"""Convert an Apify request to a Scrapy request.
Args:
@@ -113,24 +87,23 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
spider: The Scrapy spider that the request is associated with.
Raises:
- TypeError: If the apify_request is not a crawlee request.
- ValueError: If the apify_request does not contain the required keys.
+ TypeError: If the Apify request is not an instance of the `ApifyRequest` class.
+ ValueError: If the Apify request does not contain the required keys.
Returns:
The converted Scrapy request.
"""
- if not isinstance(cast(Any, apify_request), CrawleeRequest):
- raise TypeError('apify_request must be a crawlee.Request instance')
+ if not isinstance(cast(Any, apify_request), ApifyRequest):
+ raise TypeError('apify_request must be a crawlee.ScrapyRequest instance')
- call_id = crypto_random_object_id(8)
- Actor.log.debug(f'[{call_id}]: to_scrapy_request was called (apify_request={apify_request})...')
+ logger.debug(f'to_scrapy_request was called (apify_request={apify_request})...')
# If the apify_request comes from the Scrapy
if 'scrapy_request' in apify_request.user_data:
- # Deserialize the Scrapy Request from the apify_request.
+ # Deserialize the Scrapy ScrapyRequest from the apify_request.
# - This process involves decoding the base64-encoded request data and reconstructing
- # the Scrapy Request object from its dictionary representation.
- Actor.log.debug(f'[{call_id}]: Restoring the Scrapy Request from the apify_request...')
+ # the Scrapy ScrapyRequest object from its dictionary representation.
+ logger.debug('Restoring the Scrapy ScrapyRequest from the apify_request...')
scrapy_request_dict_encoded = apify_request.user_data['scrapy_request']
if not isinstance(scrapy_request_dict_encoded, str):
@@ -141,10 +114,10 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
raise TypeError('scrapy_request_dict must be a dictionary')
scrapy_request = request_from_dict(scrapy_request_dict, spider=spider)
- if not isinstance(scrapy_request, Request):
- raise TypeError('scrapy_request must be an instance of the Request class')
+ if not isinstance(scrapy_request, ScrapyRequest):
+ raise TypeError('scrapy_request must be an instance of the ScrapyRequest class')
- Actor.log.debug(f'[{call_id}]: Scrapy Request successfully reconstructed (scrapy_request={scrapy_request})...')
+ logger.debug(f'Scrapy ScrapyRequest successfully reconstructed (scrapy_request={scrapy_request})...')
# Update the meta field with the meta field from the apify_request
meta = scrapy_request.meta or {}
@@ -152,11 +125,11 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
# scrapy_request.meta is a property, so we have to set it like this
scrapy_request._meta = meta # noqa: SLF001
- # If the apify_request comes directly from the Request Queue, typically start URLs
+ # If the apify_request comes directly from the Scrapy, typically start URLs.
else:
- Actor.log.debug(f'[{call_id}]: gonna create a new Scrapy Request (cannot be restored)')
+ logger.debug('Gonna create a new Scrapy ScrapyRequest (cannot be restored)')
- scrapy_request = Request(
+ scrapy_request = ScrapyRequest(
url=apify_request.url,
method=apify_request.method,
meta={
@@ -173,5 +146,5 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
if apify_request.user_data:
scrapy_request.meta['userData'] = apify_request.user_data
- Actor.log.debug(f'[{call_id}]: an apify_request was converted to the scrapy_request={scrapy_request}')
+ logger.debug(f'an apify_request was converted to the scrapy_request={scrapy_request}')
return scrapy_request
diff --git a/src/apify/scrapy/scheduler.py b/src/apify/scrapy/scheduler.py
index 7d93388f..a243a368 100644
--- a/src/apify/scrapy/scheduler.py
+++ b/src/apify/scrapy/scheduler.py
@@ -1,41 +1,33 @@
from __future__ import annotations
import traceback
+from logging import getLogger
from typing import TYPE_CHECKING
-from crawlee.storage_clients import MemoryStorageClient
+from scrapy import Spider
+from scrapy.core.scheduler import BaseScheduler
+from scrapy.utils.reactor import is_asyncio_reactor_installed
-from apify._configuration import Configuration
+from ._async_thread import AsyncThread
+from .requests import to_apify_request, to_scrapy_request
+from apify import Configuration
from apify.apify_storage_client import ApifyStorageClient
+from apify.storages import RequestQueue
-try:
- from scrapy import Spider
- from scrapy.core.scheduler import BaseScheduler
- from scrapy.utils.reactor import is_asyncio_reactor_installed
-
- if TYPE_CHECKING:
- from scrapy.http.request import Request
-except ImportError as exc:
- raise ImportError(
- 'To use this module, you need to install the "scrapy" extra. Run "pip install apify[scrapy]".',
- ) from exc
-
-from crawlee._utils.crypto import crypto_random_object_id
+if TYPE_CHECKING:
+ from scrapy.http.request import Request
+ from twisted.internet.defer import Deferred
-from apify import Actor
-from apify.scrapy.requests import to_apify_request, to_scrapy_request
-from apify.scrapy.utils import nested_event_loop
-from apify.storages import RequestQueue
+logger = getLogger(__name__)
class ApifyScheduler(BaseScheduler):
- """A Scrapy scheduler that uses the Apify Request Queue to manage requests.
+ """A Scrapy scheduler that uses the Apify `RequestQueue` to manage requests.
This scheduler requires the asyncio Twisted reactor to be installed.
"""
def __init__(self) -> None:
- """Create a new instance."""
if not is_asyncio_reactor_installed():
raise ValueError(
f'{ApifyScheduler.__qualname__} requires the asyncio Twisted reactor. '
@@ -45,7 +37,10 @@ def __init__(self) -> None:
self._rq: RequestQueue | None = None
self.spider: Spider | None = None
- def open(self, spider: Spider) -> None: # this has to be named "open"
+ # A thread with the asyncio event loop to run coroutines on.
+ self._async_thread = AsyncThread()
+
+ def open(self, spider: Spider) -> Deferred[None] | None:
"""Open the scheduler.
Args:
@@ -53,23 +48,42 @@ def open(self, spider: Spider) -> None: # this has to be named "open"
"""
self.spider = spider
- async def open_queue() -> RequestQueue:
+ async def open_rq() -> RequestQueue:
config = Configuration.get_global_configuration()
-
- # Use the ApifyStorageClient if the Actor is running on the Apify platform,
- # otherwise use the MemoryStorageClient.
- storage_client = (
- ApifyStorageClient.from_config(config) if config.is_at_home else MemoryStorageClient.from_config(config)
- )
-
- return await RequestQueue.open(storage_client=storage_client)
+ if config.is_at_home:
+ storage_client = ApifyStorageClient.from_config(config)
+ return await RequestQueue.open(storage_client=storage_client)
+ return await RequestQueue.open()
try:
- self._rq = nested_event_loop.run_until_complete(open_queue())
- except BaseException:
+ self._rq = self._async_thread.run_coro(open_rq())
+ except Exception:
traceback.print_exc()
raise
+ return None
+
+ def close(self, reason: str) -> None:
+ """Close the scheduler.
+
+ Shut down the event loop and its thread gracefully.
+
+ Args:
+ reason: The reason for closing the spider.
+ """
+ logger.debug(f'Closing {self.__class__.__name__} due to {reason}...')
+ try:
+ self._async_thread.close()
+
+ except KeyboardInterrupt:
+ logger.warning('Shutdown interrupted by KeyboardInterrupt!')
+
+ except Exception:
+ logger.exception('Exception occurred while shutting down.')
+
+ finally:
+ logger.debug(f'{self.__class__.__name__} closed successfully.')
+
def has_pending_requests(self) -> bool:
"""Check if the scheduler has any pending requests.
@@ -80,8 +94,8 @@ def has_pending_requests(self) -> bool:
raise TypeError('self._rq must be an instance of the RequestQueue class')
try:
- is_finished = nested_event_loop.run_until_complete(self._rq.is_finished())
- except BaseException:
+ is_finished = self._async_thread.run_coro(self._rq.is_finished())
+ except Exception:
traceback.print_exc()
raise
@@ -98,29 +112,27 @@ def enqueue_request(self, request: Request) -> bool:
Returns:
True if the request was successfully enqueued, False otherwise.
"""
- call_id = crypto_random_object_id(8)
- Actor.log.debug(f'[{call_id}]: ApifyScheduler.enqueue_request was called (scrapy_request={request})...')
+ logger.debug(f'ApifyScheduler.enqueue_request was called (scrapy_request={request})...')
if not isinstance(self.spider, Spider):
raise TypeError('self.spider must be an instance of the Spider class')
apify_request = to_apify_request(request, spider=self.spider)
if apify_request is None:
- Actor.log.error(f'Request {request} was not enqueued because it could not be converted to Apify request.')
+ logger.error(f'Request {request} could not be converted to Apify request.')
return False
- Actor.log.debug(f'[{call_id}]: scrapy_request was transformed to apify_request (apify_request={apify_request})')
-
+ logger.debug(f'Converted to apify_request: {apify_request}')
if not isinstance(self._rq, RequestQueue):
raise TypeError('self._rq must be an instance of the RequestQueue class')
try:
- result = nested_event_loop.run_until_complete(self._rq.add_request(apify_request))
- except BaseException:
+ result = self._async_thread.run_coro(self._rq.add_request(apify_request))
+ except Exception:
traceback.print_exc()
raise
- Actor.log.debug(f'[{call_id}]: rq.add_request.result={result}...')
+ logger.debug(f'rq.add_request result: {result}')
return bool(result.was_already_present)
def next_request(self) -> Request | None:
@@ -129,40 +141,31 @@ def next_request(self) -> Request | None:
Returns:
The next request, or None if there are no more requests.
"""
- call_id = crypto_random_object_id(8)
- Actor.log.debug(f'[{call_id}]: ApifyScheduler.next_request was called...')
-
+ logger.debug('next_request called...')
if not isinstance(self._rq, RequestQueue):
raise TypeError('self._rq must be an instance of the RequestQueue class')
- # Fetch the next request from the Request Queue
try:
- apify_request = nested_event_loop.run_until_complete(self._rq.fetch_next_request())
- except BaseException:
+ apify_request = self._async_thread.run_coro(self._rq.fetch_next_request())
+ except Exception:
traceback.print_exc()
raise
- Actor.log.debug(
- f'[{call_id}]: a new apify_request from the scheduler was fetched (apify_request={apify_request})'
- )
-
+ logger.debug(f'Fetched apify_request: {apify_request}')
if apify_request is None:
return None
if not isinstance(self.spider, Spider):
raise TypeError('self.spider must be an instance of the Spider class')
- # Let the Request Queue know that the request is being handled. Every request should be marked as handled,
- # retrying is handled by the Scrapy's RetryMiddleware.
+ # Let the request queue know that the request is being handled. Every request should
+ # be marked as handled, retrying is handled by the Scrapy's RetryMiddleware.
try:
- nested_event_loop.run_until_complete(self._rq.mark_request_as_handled(apify_request))
- except BaseException:
+ self._async_thread.run_coro(self._rq.mark_request_as_handled(apify_request))
+ except Exception:
traceback.print_exc()
raise
scrapy_request = to_scrapy_request(apify_request, spider=self.spider)
- Actor.log.debug(
- f'[{call_id}]: apify_request was transformed to the scrapy_request which is gonna be returned '
- f'(scrapy_request={scrapy_request})',
- )
+ logger.debug(f'Converted to scrapy_request: {scrapy_request}')
return scrapy_request
diff --git a/src/apify/scrapy/utils.py b/src/apify/scrapy/utils.py
index 1f92d4ff..860c1c33 100644
--- a/src/apify/scrapy/utils.py
+++ b/src/apify/scrapy/utils.py
@@ -1,29 +1,16 @@
from __future__ import annotations
-import asyncio
from base64 import b64encode
from typing import TYPE_CHECKING
from urllib.parse import unquote
-from apify_shared.utils import ignore_docs
+from scrapy.utils.project import get_project_settings
+from scrapy.utils.python import to_bytes
-try:
- from scrapy.utils.project import get_project_settings
- from scrapy.utils.python import to_bytes
+if TYPE_CHECKING:
+ from scrapy.settings import Settings
- if TYPE_CHECKING:
- from scrapy.settings import Settings
-except ImportError as exc:
- raise ImportError(
- 'To use this module, you need to install the "scrapy" extra. For example, if you use pip, run '
- '"pip install apify[scrapy]".'
- ) from exc
-
-nested_event_loop: asyncio.AbstractEventLoop = asyncio.new_event_loop()
-
-
-@ignore_docs
def get_basic_auth_header(username: str, password: str, auth_encoding: str = 'latin-1') -> bytes:
"""Generate a basic authentication header for the given username and password."""
string = f'{unquote(username)}:{unquote(password)}'
@@ -31,18 +18,6 @@ def get_basic_auth_header(username: str, password: str, auth_encoding: str = 'la
return b'Basic ' + b64encode(user_pass)
-@ignore_docs
-def get_running_event_loop_id() -> int:
- """Get the ID of the currently running event loop.
-
- It could be useful mainly for debugging purposes.
-
- Returns:
- The ID of the event loop.
- """
- return id(asyncio.get_running_loop())
-
-
def apply_apify_settings(*, settings: Settings | None = None, proxy_config: dict | None = None) -> Settings:
"""Integrates Apify configuration into a Scrapy project settings.
@@ -65,10 +40,6 @@ def apply_apify_settings(*, settings: Settings | None = None, proxy_config: dict
# ensuring it is executed as the final step in the pipeline sequence
settings['ITEM_PIPELINES']['apify.scrapy.pipelines.ActorDatasetPushPipeline'] = 1000
- # Disable the default AjaxCrawlMiddleware since it can be problematic with Apify. It can return a new request
- # during process_response, but currently we have no way of detecting it and handling it properly.
- settings['DOWNLOADER_MIDDLEWARES']['scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware'] = None
-
# Replace the default HttpProxyMiddleware with ApifyHttpProxyMiddleware
settings['DOWNLOADER_MIDDLEWARES']['scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware'] = None
settings['DOWNLOADER_MIDDLEWARES']['apify.scrapy.middlewares.ApifyHttpProxyMiddleware'] = 750
diff --git a/tests/integration/README.md b/tests/integration/README.md
index a3b2dbcf..81dad75e 100644
--- a/tests/integration/README.md
+++ b/tests/integration/README.md
@@ -94,7 +94,6 @@ async def test_something(
output_record = await actor.last_run().key_value_store().get_record('OUTPUT')
assert output_record is not None
assert output_record['value'] == expected_output
-
```
Or you can pass multiple source files with the `source_files` argument, if you need something really complex:
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 9a74924a..96052805 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -198,6 +198,7 @@ def __call__(
main_func: Callable | None = None,
main_py: str | None = None,
source_files: Mapping[str, str | bytes] | None = None,
+ additional_requirements: list[str] | None = None,
) -> Awaitable[ActorClientAsync]:
"""Create a temporary Actor from the given main function or source files.
@@ -211,6 +212,7 @@ def __call__(
main_func: The main function of the Actor.
main_py: The `src/main.py` file of the Actor.
source_files: A dictionary of the source files of the Actor.
+ additional_requirements: A list of additional requirements to be added to the `requirements.txt`.
Returns:
A resource client for the created Actor.
@@ -235,6 +237,7 @@ async def _make_actor(
main_func: Callable | None = None,
main_py: str | None = None,
source_files: Mapping[str, str | bytes] | None = None,
+ additional_requirements: list[str] | None = None,
) -> ActorClientAsync:
if not (main_func or main_py or source_files):
raise TypeError('One of `main_func`, `main_py` or `source_files` arguments must be specified')
@@ -270,6 +273,16 @@ async def _make_actor(
actor_source_files = actor_base_source_files.copy()
actor_source_files.update(source_files)
+ if additional_requirements:
+ # Get the current requirements.txt content (as a string).
+ req_content = actor_source_files.get('requirements.txt', '')
+ if isinstance(req_content, bytes):
+ req_content = req_content.decode('utf-8')
+ # Append the additional requirements, each on a new line.
+ additional_reqs = '\n'.join(additional_requirements)
+ req_content = req_content.strip() + '\n' + additional_reqs + '\n'
+ actor_source_files['requirements.txt'] = req_content
+
# Reformat the source files in a format that the Apify API understands.
source_files_for_api = []
for file_name, file_contents in actor_source_files.items():
diff --git a/tests/integration/test_actor_scrapy.py b/tests/integration/test_actor_scrapy.py
new file mode 100644
index 00000000..5cad9ad6
--- /dev/null
+++ b/tests/integration/test_actor_scrapy.py
@@ -0,0 +1,48 @@
+from __future__ import annotations
+
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from .conftest import MakeActorFunction, RunActorFunction
+
+
+async def test_actor_scrapy_title_spider(
+ make_actor: MakeActorFunction,
+ run_actor: RunActorFunction,
+) -> None:
+ base_path = Path('docs/02_guides/code/scrapy_project')
+
+ actor_source_files = {
+ 'src/__init__.py': (base_path / 'src/__init__.py').read_text(),
+ 'src/__main__.py': (base_path / 'src/__main__.py').read_text(),
+ 'src/items.py': (base_path / 'src/items.py').read_text(),
+ 'src/main.py': (base_path / 'src/main.py').read_text(),
+ 'src/settings.py': (base_path / 'src/settings.py').read_text(),
+ 'src/spiders/__init__.py': (base_path / 'src/spiders/__init__.py').read_text(),
+ 'src/spiders/title.py': (base_path / 'src/spiders/title.py').read_text(),
+ }
+
+ actor = await make_actor(
+ 'actor-scrapy-title-spider',
+ source_files=actor_source_files,
+ additional_requirements=['scrapy~=2.12.0'],
+ )
+ run_result = await run_actor(
+ actor,
+ run_input={
+ 'startUrls': [{'url': 'https://crawlee.dev'}],
+ 'allowedDomains': ['crawlee.dev'],
+ 'proxyConfiguration': {'useApifyProxy': True},
+ },
+ )
+
+ assert run_result.status == 'SUCCEEDED'
+
+ items = await actor.last_run().dataset().list_items()
+
+ assert items.count >= 10
+
+ for item in items.items:
+ assert 'url' in item
+ assert 'title' in item
diff --git a/tests/unit/scrapy/utils/test_apply_apify_settings.py b/tests/unit/scrapy/utils/test_apply_apify_settings.py
index 64e67a24..6c5227c0 100644
--- a/tests/unit/scrapy/utils/test_apply_apify_settings.py
+++ b/tests/unit/scrapy/utils/test_apply_apify_settings.py
@@ -42,7 +42,6 @@ def test_updates_downloader_middlewares() -> None:
assert new_settings.get('DOWNLOADER_MIDDLEWARES') == {
'apify.scrapy.middlewares.ApifyHttpProxyMiddleware': 750,
- 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': None,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 543,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 123,