Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 24 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -128,23 +128,35 @@
- Made to handle `tens-of-thousands` connections / sec

```console
# On Macbook Pro 2015 / 2.8 GHz Intel Core i7
❯ hey -n 10000 -c 100 http://localhost:8899/
# On Macbook Pro 2019 / 2.4 GHz 8-Core Intel Core i9 / 32 GB RAM
❯ hey -n 10000 -c 100 http://localhost:8899/http-route-example

Summary:
Total: 0.6157 secs
Slowest: 0.1049 secs
Fastest: 0.0007 secs
Average: 0.0055 secs
Requests/sec: 16240.5444
Total: 0.3248 secs
Slowest: 0.1007 secs
Fastest: 0.0002 secs
Average: 0.0028 secs
Requests/sec: 30784.7958

Total data: 800000 bytes
Size/request: 80 bytes
Total data: 190000 bytes
Size/request: 19 bytes

Response time histogram:
0.001 [1] |
0.011 [9565] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.022 [332] |■
0.000 [1] |
0.010 [9533] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.020 [384] |■■

Latency distribution:
10% in 0.0004 secs
25% in 0.0007 secs
50% in 0.0013 secs
75% in 0.0029 secs
90% in 0.0057 secs
95% in 0.0097 secs
99% in 0.0185 secs

Status code distribution:
[200] 100000 responses
```

- Lightweight
Expand Down
59 changes: 31 additions & 28 deletions examples/pubsub_eventing.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,29 +12,38 @@
import multiprocessing
import logging

from typing import Dict, Any
from typing import Dict, Any, Optional

from proxy.common.constants import DEFAULT_LOG_FORMAT
from proxy.core.event import EventManager, EventQueue, EventSubscriber, eventNames

# Enable debug logging to view core event logs
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG, format=DEFAULT_LOG_FORMAT)

logger = logging.getLogger(__name__)


main_publisher_request_id = '1234'
process_publisher_request_id = '12345'
num_events_received = [0, 0]

logger = logging.getLogger(__name__)

# Execute within a separate thread context
def on_event(payload: Dict[str, Any]) -> None:
'''Subscriber callback.'''
global num_events_received
if payload['request_id'] == '1234':
num_events_received[0] += 1
else:
num_events_received[1] += 1


def publisher_process(
shutdown_event: multiprocessing.synchronize.Event,
dispatcher_queue: EventQueue,
) -> None:
logger.info('publisher starting')
logger.info('publisher started')
try:
while not shutdown_event.is_set():
dispatcher_queue.publish(
request_id=process_publisher_request_id,
request_id='12345',
event_name=eventNames.WORK_STARTED,
event_payload={'time': time.time()},
publisher_id='eventing_pubsub_process',
Expand All @@ -44,27 +53,19 @@ def publisher_process(
logger.info('publisher shutdown')


# Execute within a separate thread context
def on_event(payload: Dict[str, Any]) -> None:
'''Subscriber callback.'''
global num_events_received
if payload['request_id'] == main_publisher_request_id:
num_events_received[0] += 1
else:
num_events_received[1] += 1


if __name__ == '__main__':
start_time = time.time()

# Start eventing core
subscriber: Optional[EventSubscriber] = None
with EventManager() as event_manager:
assert event_manager.queue

# Create a subscriber.
# Internally, subscribe will start a separate thread
# to receive incoming published messages.
subscriber = EventSubscriber(event_manager.queue)
subscriber.subscribe(on_event)
subscriber = EventSubscriber(event_manager.queue, callback=on_event)
subscriber.setup()

# Start a publisher process to demonstrate safe exchange
# of messages between processes.
Expand All @@ -81,22 +82,24 @@ def on_event(payload: Dict[str, Any]) -> None:
try:
while True:
event_manager.queue.publish(
request_id=main_publisher_request_id,
request_id='1234',
event_name=eventNames.WORK_STARTED,
event_payload={'time': time.time()},
publisher_id='eventing_pubsub_main',
)
except KeyboardInterrupt:
logger.info('bye!!!')
finally:
# Stop publisher
# Stop publisher process
publisher_shutdown_event.set()
publisher.join()
# Stop subscriber thread
subscriber.unsubscribe()
logger.info(
'Received {0} events from main thread, {1} events from another process, in {2} seconds'.format(
num_events_received[0], num_events_received[1], time.time(
) - start_time,
),
)
logger.info(
'Received {0} events from main thread, {1} events from another process, in {2} seconds'.format(
num_events_received[0], num_events_received[1], time.time(
) - start_time,
),
)
if subscriber:
subscriber.shutdown(do_unsubscribe=False)
3 changes: 3 additions & 0 deletions proxy/common/flag.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,9 @@ def initialize(
IpAddress,
opts.get('hostname', ipaddress.ip_address(args.hostname)),
)
args.unix_socket_path = opts.get(
'unix_socket_path', args.unix_socket_path,
)
# AF_UNIX is not available on Windows
# See https://bugs.python.org/issue33408
if os.name != 'nt':
Expand Down
2 changes: 2 additions & 0 deletions proxy/core/acceptor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,13 @@
from .work import Work
from .threadless import Threadless
from .executors import ThreadlessPool
from .listener import Listener

__all__ = [
'Acceptor',
'AcceptorPool',
'Work',
'Threadless',
'ThreadlessPool',
'Listener',
]
13 changes: 8 additions & 5 deletions proxy/core/acceptor/acceptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class Acceptor(multiprocessing.Process):
"""Work acceptor process.

On start-up, `Acceptor` accepts a file descriptor which will be used to
accept new work. File descriptor is accepted over a `work_queue` which is
accept new work. File descriptor is accepted over a `fd_queue` which is
closed immediately after receiving the descriptor.

`Acceptor` goes on to listen for new work over the received server socket.
Expand All @@ -55,7 +55,7 @@ class Acceptor(multiprocessing.Process):
def __init__(
self,
idd: int,
work_queue: connection.Connection,
fd_queue: connection.Connection,
flags: argparse.Namespace,
lock: multiprocessing.synchronize.Lock,
executor_queues: List[connection.Connection],
Expand All @@ -72,7 +72,7 @@ def __init__(
# to avoid concurrent accept over server socket
self.lock = lock
# Queue over which server socket fd is received on start-up
self.work_queue: connection.Connection = work_queue
self.fd_queue: connection.Connection = fd_queue
# Available executors
self.executor_queues = executor_queues
self.executor_pids = executor_pids
Expand Down Expand Up @@ -101,8 +101,11 @@ def run(self) -> None:
self.flags.log_format,
)
self.selector = selectors.DefaultSelector()
fileno = recv_handle(self.work_queue)
self.work_queue.close()
# TODO: Use selector on fd_queue so that we can
# dynamically accept from new fds.
fileno = recv_handle(self.fd_queue)
self.fd_queue.close()
# TODO: Convert to socks i.e. list of fds
self.sock = socket.fromfd(
fileno,
family=self.flags.family,
Expand Down
108 changes: 108 additions & 0 deletions proxy/core/acceptor/listener.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.

:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import os
import socket
import logging
import argparse

from typing import Optional, Any

from ...common.flag import flags
from ...common.constants import DEFAULT_BACKLOG, DEFAULT_IPV6_HOSTNAME, DEFAULT_PORT


flags.add_argument(
'--backlog',
type=int,
default=DEFAULT_BACKLOG,
help='Default: 100. Maximum number of pending connections to proxy server',
)

flags.add_argument(
'--hostname',
type=str,
default=str(DEFAULT_IPV6_HOSTNAME),
help='Default: ::1. Server IP address.',
)

flags.add_argument(
'--port', type=int, default=DEFAULT_PORT,
help='Default: 8899. Server port.',
)

flags.add_argument(
'--unix-socket-path',
type=str,
default=None,
help='Default: None. Unix socket path to use. ' +
'When provided --host and --port flags are ignored',
)

logger = logging.getLogger(__name__)


class Listener:

def __init__(self, flags: argparse.Namespace) -> None:
self.flags = flags
# Set after binding to a port.
# Stored here separately because ephemeral ports can be used.
self._port: Optional[int] = None
self._socket: Optional[socket.socket] = None

def __enter__(self) -> 'Listener':
self.setup()
return self

def __exit__(self, *args: Any) -> None:
self.shutdown()

def fileno(self) -> Optional[int]:
if not self._socket:
return None
return self._socket.fileno()

def setup(self) -> None:
if self.flags.unix_socket_path:
self._listen_unix_socket()
else:
self._listen_server_port()
if self.flags.unix_socket_path:
logger.info(
'Listening on %s' %
self.flags.unix_socket_path,
)
else:
logger.info(
'Listening on %s:%s' %
(self.flags.hostname, self._port),
)

def shutdown(self) -> None:
assert self._socket
self._socket.close()
if self.flags.unix_socket_path:
os.remove(self.flags.unix_socket_path)

def _listen_unix_socket(self) -> None:
self._socket = socket.socket(self.flags.family, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.flags.unix_socket_path)
self._socket.listen(self.flags.backlog)
self._socket.setblocking(False)

def _listen_server_port(self) -> None:
self._socket = socket.socket(self.flags.family, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((str(self.flags.hostname), self.flags.port))
self._socket.listen(self.flags.backlog)
self._socket.setblocking(False)
self._port = self._socket.getsockname()[1]
Loading