|
5 | 5 | import time |
6 | 6 | import warnings |
7 | 7 | from abc import ABCMeta, abstractmethod |
| 8 | +from contextlib import nullcontext |
8 | 9 | from math import floor |
9 | 10 | from pathlib import Path |
10 | 11 | from queue import Empty, Queue |
|
18 | 19 | from clp_ffi_py.utils import serialize_dict_to_msgpack |
19 | 20 | from zstandard import FLUSH_FRAME, ZstdCompressionWriter, ZstdCompressor |
20 | 21 |
|
21 | | -from clp_logging.auto_generated_kv_pairs_utils import ( |
22 | | - AutoGeneratedKeyValuePairsBuffer, |
23 | | - create_loglib_generated_log_event_as_auto_generated_kv_pairs, |
24 | | -) |
| 22 | +from clp_logging.auto_generated_kv_pairs_utils import AutoGeneratedKeyValuePairsBuffer |
25 | 23 | from clp_logging.protocol import ( |
26 | 24 | BYTE_ORDER, |
27 | 25 | EOF_CHAR, |
|
31 | 29 | UINT_MAX, |
32 | 30 | ULONG_MAX, |
33 | 31 | ) |
| 32 | +from clp_logging.utils import Timestamp |
34 | 33 |
|
35 | 34 | # TODO: lock writes to zstream if GIL ever goes away |
36 | 35 | # Note: no need to quote "Queue[Tuple[int, bytes]]" in python 3.9 |
@@ -228,7 +227,7 @@ def __init__( |
228 | 227 | self.hard_timeout_thread: Optional[Timer] = None |
229 | 228 | self.soft_timeout_thread: Optional[Timer] = None |
230 | 229 |
|
231 | | - def set_ostream(self, ostream: Union[ZstdCompressionWriter, IO[bytes], Serializer]) -> None: |
| 230 | + def set_ostream(self, ostream: Union[ZstdCompressionWriter, IO[bytes]]) -> None: |
232 | 231 | self.ostream = ostream |
233 | 232 |
|
234 | 233 | def timeout(self) -> None: |
@@ -829,26 +828,21 @@ class ClpKeyValuePairStreamHandler(logging.Handler): |
829 | 828 | :param stream: A writable byte output stream to which the handler will write the serialized IR |
830 | 829 | byte sequences. |
831 | 830 | :param enable_compression: Whether to compress the serialized IR byte sequences using zstd. |
832 | | - :param loglevel_timeout: Customized timeout configuration. |
833 | 831 | """ |
834 | 832 |
|
835 | 833 | def __init__( |
836 | 834 | self, |
837 | 835 | stream: IO[bytes], |
838 | 836 | enable_compression: bool = True, |
839 | | - timezone: Optional[str] = None, |
840 | | - loglevel_timeout: Optional[CLPLogLevelTimeout] = None, |
841 | 837 | ) -> None: |
842 | 838 | super().__init__() |
843 | 839 |
|
844 | 840 | self._enable_compression: bool = enable_compression |
845 | | - self._tz: Optional[str] = timezone |
846 | | - self._loglevel_timeout: Optional[CLPLogLevelTimeout] = loglevel_timeout |
847 | 841 | self._serializer: Optional[Serializer] = None |
848 | 842 | self._formatter: Optional[logging.Formatter] = None |
849 | 843 | self._ostream: IO[bytes] = stream |
850 | 844 |
|
851 | | - self._auto_generated_kv_pairs_buf: AutoGeneratedKeyValuePairsBuffer = ( |
| 845 | + self._auto_gen_kv_pairs_buf: AutoGeneratedKeyValuePairsBuffer = ( |
852 | 846 | AutoGeneratedKeyValuePairsBuffer() |
853 | 847 | ) |
854 | 848 |
|
@@ -878,90 +872,95 @@ def emit(self, record: logging.LogRecord) -> None: |
878 | 872 | except Exception: |
879 | 873 | self.handleError(record) |
880 | 874 |
|
881 | | - # Added to `logging.StreamHandler` in Python 3.7 |
882 | 875 | # override |
883 | 876 | def setStream(self, stream: IO[bytes]) -> Optional[IO[bytes]]: |
| 877 | + """ |
| 878 | + Sets the instance’s stream to the specified value, if it is different. The old stream is |
| 879 | + flushed before the new stream is set. |
| 880 | + NOTE: The old stream will be closed by calling this method. |
| 881 | +
|
| 882 | + :param stream: A writable byte output stream to which the handler will write the serialized |
| 883 | + IR byte sequences. |
| 884 | + :return: The old stream if the stream was changed, or `None` if it wasn't. |
| 885 | + """ |
| 886 | + |
| 887 | + # NOTE: This function is implemented by mirroring CPython's implementation. |
| 888 | + |
884 | 889 | if stream is self._ostream: |
885 | 890 | return None |
| 891 | + |
886 | 892 | old_stream: IO[bytes] = self._ostream |
887 | | - self._ostream = stream |
888 | | - # TODO: The following call will close the old stream. However, `logging.StreamHandler`'s |
889 | | - # implementation will only flush the stream but leave it opened. To support this behaviour, |
890 | | - # we need `clp_ffi_py.ir.Serializer` to allow closing the serializer without closing the |
891 | | - # underlying output stream. |
892 | | - self._init_new_serializer(stream) |
| 893 | + with self.lock if self.lock else nullcontext(): |
| 894 | + # TODO: The following call will close the old stream. However, `logging.StreamHandler`'s |
| 895 | + # implementation will only flush the stream but leave it opened. To support this |
| 896 | + # behaviour, we need `clp_ffi_py.ir.Serializer` to allow closing the serializer without |
| 897 | + # closing the underlying output stream. |
| 898 | + self._init_new_serializer(stream) |
| 899 | + self._ostream = stream |
893 | 900 | return old_stream |
894 | 901 |
|
895 | 902 | # override |
896 | 903 | def close(self) -> None: |
897 | | - if self._serializer is None: |
| 904 | + if self._is_closed(): |
898 | 905 | return |
899 | | - if self._loglevel_timeout: |
900 | | - self._loglevel_timeout.timeout() |
901 | | - # NOTE: Closing the serializer will ensure that any buffered results are flushed and the |
902 | | - # underlying output stream is properly closed. |
903 | | - self._serializer.close() |
904 | | - self._serializer = None |
| 906 | + self._close_serializer() |
905 | 907 | super().close() |
906 | 908 |
|
907 | 909 | def _is_closed(self) -> bool: |
908 | 910 | return self._serializer is None |
909 | 911 |
|
| 912 | + def _close_serializer(self) -> None: |
| 913 | + """ |
| 914 | + Closes the current serializer if it has been set. |
| 915 | +
|
| 916 | + NOTE: The underlying output stream will also be closed. |
| 917 | + """ |
| 918 | + if self._is_closed(): |
| 919 | + return |
| 920 | + assert self._serializer is not None |
| 921 | + self._serializer.close() |
| 922 | + self._serializer = None |
| 923 | + |
910 | 924 | def _init_new_serializer(self, stream: IO[bytes]) -> None: |
911 | 925 | """ |
912 | 926 | Initializes a new serializer that writes to the given stream. |
913 | 927 |
|
914 | 928 | :param stream: The stream that the underlying serializer will write to. |
915 | 929 | """ |
916 | | - cctx: ZstdCompressor = ZstdCompressor() |
| 930 | + self._close_serializer() |
917 | 931 | self._serializer = Serializer( |
918 | | - cctx.stream_writer(stream) if self._enable_compression else stream |
| 932 | + ZstdCompressor().stream_writer(stream) if self._enable_compression else stream |
919 | 933 | ) |
920 | 934 |
|
921 | 935 | def _write(self, record: logging.LogRecord) -> None: |
922 | 936 | """ |
923 | 937 | Writes the log event into the underlying serializer. |
924 | 938 |
|
925 | 939 | :param record: The log event to serialize. |
926 | | - :raise IOError: If the handler has been already closed. |
| 940 | + :raise RuntimeError: If the handler has been already closed. |
927 | 941 | :raise TypeError: If `record.msg` is not a Python dictionary. |
928 | 942 | """ |
929 | 943 | if self._is_closed(): |
930 | | - raise IOError("Stream already closed.") |
| 944 | + raise RuntimeError("Stream already closed.") |
931 | 945 |
|
932 | 946 | if not isinstance(record.msg, dict): |
933 | 947 | raise TypeError("The log msg must be a valid Python dictionary.") |
934 | 948 |
|
935 | | - curr_ts: int = floor(time.time() * 1000) |
936 | | - |
937 | | - if self._loglevel_timeout is not None: |
938 | | - self._loglevel_timeout.update(record.levelno, curr_ts, self._log_level_timeout_callback) |
939 | | - |
940 | 949 | self._serialize_kv_pair_log_event( |
941 | | - self._auto_generated_kv_pairs_buf.generate(curr_ts, self._tz, record), record.msg |
| 950 | + self._auto_gen_kv_pairs_buf.generate(Timestamp.now(), record), record.msg |
942 | 951 | ) |
943 | 952 |
|
944 | 953 | def _serialize_kv_pair_log_event( |
945 | | - self, auto_generated_kv_pairs: Dict[str, Any], user_generated_kv_pairs: Dict[str, Any] |
| 954 | + self, auto_gen_kv_pairs: Dict[str, Any], user_gen_kv_pairs: Dict[str, Any] |
946 | 955 | ) -> None: |
947 | 956 | """ |
948 | | - :param auto_generated_kv_pairs: A dict of auto generated kv pairs. |
949 | | - :param user_generated_kv_pairs: A dict of user generated kv pairs. |
| 957 | + :param auto_gen_kv_pairs: A dict of auto generated kv pairs. |
| 958 | + :param user_gen_kv_pairs: A dict of user generated kv pairs. |
950 | 959 | """ |
951 | | - log_event: Dict[str, Any] = { |
952 | | - AUTO_GENERATED_KV_PAIRS_KEY: auto_generated_kv_pairs, |
953 | | - USER_GENERATED_KV_PAIRS_KEY: user_generated_kv_pairs, |
954 | | - } |
| 960 | + if self._is_closed(): |
| 961 | + raise RuntimeError("Stream already closed.") |
955 | 962 | assert self._serializer is not None |
956 | | - self._serializer.serialize_log_event_from_msgpack_map(serialize_dict_to_msgpack(log_event)) |
957 | | - |
958 | | - def _log_level_timeout_callback(self, msg: str) -> None: |
959 | | - """ |
960 | | - Callback for `CLPLogLevelTimeout` to log internal errors. |
961 | | -
|
962 | | - :param msg: The message sent from `CLPLogLevelTimeout`.` |
963 | | - """ |
964 | | - curr_ts: int = floor(time.time() * 1000) |
965 | | - self._serialize_kv_pair_log_event( |
966 | | - create_loglib_generated_log_event_as_auto_generated_kv_pairs(curr_ts, self._tz, msg), {} |
| 963 | + self._serializer.serialize_log_event_from_msgpack_map( |
| 964 | + serialize_dict_to_msgpack(auto_gen_kv_pairs), |
| 965 | + serialize_dict_to_msgpack(user_gen_kv_pairs), |
967 | 966 | ) |
0 commit comments