ยปCore Development>Code coverage>Lib/logging/handlers.py

Python code coverage for Lib/logging/handlers.py

#countcontent
1n/a# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
2n/a#
3n/a# Permission to use, copy, modify, and distribute this software and its
4n/a# documentation for any purpose and without fee is hereby granted,
5n/a# provided that the above copyright notice appear in all copies and that
6n/a# both that copyright notice and this permission notice appear in
7n/a# supporting documentation, and that the name of Vinay Sajip
8n/a# not be used in advertising or publicity pertaining to distribution
9n/a# of the software without specific, written prior permission.
10n/a# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11n/a# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12n/a# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13n/a# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14n/a# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15n/a# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16n/a
17n/a"""
18n/aAdditional handlers for the logging package for Python. The core package is
19n/abased on PEP 282 and comments thereto in comp.lang.python.
20n/a
21n/aCopyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
22n/a
23n/aTo use, simply 'import logging.handlers' and log away!
24n/a"""
25n/a
26n/aimport logging, socket, os, pickle, struct, time, re
27n/afrom stat import ST_DEV, ST_INO, ST_MTIME
28n/aimport queue
29n/atry:
30n/a import threading
31n/aexcept ImportError: #pragma: no cover
32n/a threading = None
33n/a
34n/a#
35n/a# Some constants...
36n/a#
37n/a
38n/aDEFAULT_TCP_LOGGING_PORT = 9020
39n/aDEFAULT_UDP_LOGGING_PORT = 9021
40n/aDEFAULT_HTTP_LOGGING_PORT = 9022
41n/aDEFAULT_SOAP_LOGGING_PORT = 9023
42n/aSYSLOG_UDP_PORT = 514
43n/aSYSLOG_TCP_PORT = 514
44n/a
45n/a_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
46n/a
47n/aclass BaseRotatingHandler(logging.FileHandler):
48n/a """
49n/a Base class for handlers that rotate log files at a certain point.
50n/a Not meant to be instantiated directly. Instead, use RotatingFileHandler
51n/a or TimedRotatingFileHandler.
52n/a """
53n/a def __init__(self, filename, mode, encoding=None, delay=False):
54n/a """
55n/a Use the specified filename for streamed logging
56n/a """
57n/a logging.FileHandler.__init__(self, filename, mode, encoding, delay)
58n/a self.mode = mode
59n/a self.encoding = encoding
60n/a self.namer = None
61n/a self.rotator = None
62n/a
63n/a def emit(self, record):
64n/a """
65n/a Emit a record.
66n/a
67n/a Output the record to the file, catering for rollover as described
68n/a in doRollover().
69n/a """
70n/a try:
71n/a if self.shouldRollover(record):
72n/a self.doRollover()
73n/a logging.FileHandler.emit(self, record)
74n/a except Exception:
75n/a self.handleError(record)
76n/a
77n/a def rotation_filename(self, default_name):
78n/a """
79n/a Modify the filename of a log file when rotating.
80n/a
81n/a This is provided so that a custom filename can be provided.
82n/a
83n/a The default implementation calls the 'namer' attribute of the
84n/a handler, if it's callable, passing the default name to
85n/a it. If the attribute isn't callable (the default is None), the name
86n/a is returned unchanged.
87n/a
88n/a :param default_name: The default name for the log file.
89n/a """
90n/a if not callable(self.namer):
91n/a result = default_name
92n/a else:
93n/a result = self.namer(default_name)
94n/a return result
95n/a
96n/a def rotate(self, source, dest):
97n/a """
98n/a When rotating, rotate the current log.
99n/a
100n/a The default implementation calls the 'rotator' attribute of the
101n/a handler, if it's callable, passing the source and dest arguments to
102n/a it. If the attribute isn't callable (the default is None), the source
103n/a is simply renamed to the destination.
104n/a
105n/a :param source: The source filename. This is normally the base
106n/a filename, e.g. 'test.log'
107n/a :param dest: The destination filename. This is normally
108n/a what the source is rotated to, e.g. 'test.log.1'.
109n/a """
110n/a if not callable(self.rotator):
111n/a # Issue 18940: A file may not have been created if delay is True.
112n/a if os.path.exists(source):
113n/a os.rename(source, dest)
114n/a else:
115n/a self.rotator(source, dest)
116n/a
117n/aclass RotatingFileHandler(BaseRotatingHandler):
118n/a """
119n/a Handler for logging to a set of files, which switches from one file
120n/a to the next when the current file reaches a certain size.
121n/a """
122n/a def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
123n/a """
124n/a Open the specified file and use it as the stream for logging.
125n/a
126n/a By default, the file grows indefinitely. You can specify particular
127n/a values of maxBytes and backupCount to allow the file to rollover at
128n/a a predetermined size.
129n/a
130n/a Rollover occurs whenever the current log file is nearly maxBytes in
131n/a length. If backupCount is >= 1, the system will successively create
132n/a new files with the same pathname as the base file, but with extensions
133n/a ".1", ".2" etc. appended to it. For example, with a backupCount of 5
134n/a and a base file name of "app.log", you would get "app.log",
135n/a "app.log.1", "app.log.2", ... through to "app.log.5". The file being
136n/a written to is always "app.log" - when it gets filled up, it is closed
137n/a and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
138n/a exist, then they are renamed to "app.log.2", "app.log.3" etc.
139n/a respectively.
140n/a
141n/a If maxBytes is zero, rollover never occurs.
142n/a """
143n/a # If rotation/rollover is wanted, it doesn't make sense to use another
144n/a # mode. If for example 'w' were specified, then if there were multiple
145n/a # runs of the calling application, the logs from previous runs would be
146n/a # lost if the 'w' is respected, because the log file would be truncated
147n/a # on each run.
148n/a if maxBytes > 0:
149n/a mode = 'a'
150n/a BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
151n/a self.maxBytes = maxBytes
152n/a self.backupCount = backupCount
153n/a
154n/a def doRollover(self):
155n/a """
156n/a Do a rollover, as described in __init__().
157n/a """
158n/a if self.stream:
159n/a self.stream.close()
160n/a self.stream = None
161n/a if self.backupCount > 0:
162n/a for i in range(self.backupCount - 1, 0, -1):
163n/a sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
164n/a dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
165n/a i + 1))
166n/a if os.path.exists(sfn):
167n/a if os.path.exists(dfn):
168n/a os.remove(dfn)
169n/a os.rename(sfn, dfn)
170n/a dfn = self.rotation_filename(self.baseFilename + ".1")
171n/a if os.path.exists(dfn):
172n/a os.remove(dfn)
173n/a self.rotate(self.baseFilename, dfn)
174n/a if not self.delay:
175n/a self.stream = self._open()
176n/a
177n/a def shouldRollover(self, record):
178n/a """
179n/a Determine if rollover should occur.
180n/a
181n/a Basically, see if the supplied record would cause the file to exceed
182n/a the size limit we have.
183n/a """
184n/a if self.stream is None: # delay was set...
185n/a self.stream = self._open()
186n/a if self.maxBytes > 0: # are we rolling over?
187n/a msg = "%s\n" % self.format(record)
188n/a self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
189n/a if self.stream.tell() + len(msg) >= self.maxBytes:
190n/a return 1
191n/a return 0
192n/a
193n/aclass TimedRotatingFileHandler(BaseRotatingHandler):
194n/a """
195n/a Handler for logging to a file, rotating the log file at certain timed
196n/a intervals.
197n/a
198n/a If backupCount is > 0, when rollover is done, no more than backupCount
199n/a files are kept - the oldest ones are deleted.
200n/a """
201n/a def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
202n/a BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
203n/a self.when = when.upper()
204n/a self.backupCount = backupCount
205n/a self.utc = utc
206n/a self.atTime = atTime
207n/a # Calculate the real rollover interval, which is just the number of
208n/a # seconds between rollovers. Also set the filename suffix used when
209n/a # a rollover occurs. Current 'when' events supported:
210n/a # S - Seconds
211n/a # M - Minutes
212n/a # H - Hours
213n/a # D - Days
214n/a # midnight - roll over at midnight
215n/a # W{0-6} - roll over on a certain day; 0 - Monday
216n/a #
217n/a # Case of the 'when' specifier is not important; lower or upper case
218n/a # will work.
219n/a if self.when == 'S':
220n/a self.interval = 1 # one second
221n/a self.suffix = "%Y-%m-%d_%H-%M-%S"
222n/a self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
223n/a elif self.when == 'M':
224n/a self.interval = 60 # one minute
225n/a self.suffix = "%Y-%m-%d_%H-%M"
226n/a self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
227n/a elif self.when == 'H':
228n/a self.interval = 60 * 60 # one hour
229n/a self.suffix = "%Y-%m-%d_%H"
230n/a self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
231n/a elif self.when == 'D' or self.when == 'MIDNIGHT':
232n/a self.interval = 60 * 60 * 24 # one day
233n/a self.suffix = "%Y-%m-%d"
234n/a self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
235n/a elif self.when.startswith('W'):
236n/a self.interval = 60 * 60 * 24 * 7 # one week
237n/a if len(self.when) != 2:
238n/a raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
239n/a if self.when[1] < '0' or self.when[1] > '6':
240n/a raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
241n/a self.dayOfWeek = int(self.when[1])
242n/a self.suffix = "%Y-%m-%d"
243n/a self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
244n/a else:
245n/a raise ValueError("Invalid rollover interval specified: %s" % self.when)
246n/a
247n/a self.extMatch = re.compile(self.extMatch, re.ASCII)
248n/a self.interval = self.interval * interval # multiply by units requested
249n/a # The following line added because the filename passed in could be a
250n/a # path object (see Issue #27493), but self.baseFilename will be a string
251n/a filename = self.baseFilename
252n/a if os.path.exists(filename):
253n/a t = os.stat(filename)[ST_MTIME]
254n/a else:
255n/a t = int(time.time())
256n/a self.rolloverAt = self.computeRollover(t)
257n/a
258n/a def computeRollover(self, currentTime):
259n/a """
260n/a Work out the rollover time based on the specified time.
261n/a """
262n/a result = currentTime + self.interval
263n/a # If we are rolling over at midnight or weekly, then the interval is already known.
264n/a # What we need to figure out is WHEN the next interval is. In other words,
265n/a # if you are rolling over at midnight, then your base interval is 1 day,
266n/a # but you want to start that one day clock at midnight, not now. So, we
267n/a # have to fudge the rolloverAt value in order to trigger the first rollover
268n/a # at the right time. After that, the regular interval will take care of
269n/a # the rest. Note that this code doesn't care about leap seconds. :)
270n/a if self.when == 'MIDNIGHT' or self.when.startswith('W'):
271n/a # This could be done with less code, but I wanted it to be clear
272n/a if self.utc:
273n/a t = time.gmtime(currentTime)
274n/a else:
275n/a t = time.localtime(currentTime)
276n/a currentHour = t[3]
277n/a currentMinute = t[4]
278n/a currentSecond = t[5]
279n/a currentDay = t[6]
280n/a # r is the number of seconds left between now and the next rotation
281n/a if self.atTime is None:
282n/a rotate_ts = _MIDNIGHT
283n/a else:
284n/a rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
285n/a self.atTime.second)
286n/a
287n/a r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
288n/a currentSecond)
289n/a if r < 0:
290n/a # Rotate time is before the current time (for example when
291n/a # self.rotateAt is 13:45 and it now 14:15), rotation is
292n/a # tomorrow.
293n/a r += _MIDNIGHT
294n/a currentDay = (currentDay + 1) % 7
295n/a result = currentTime + r
296n/a # If we are rolling over on a certain day, add in the number of days until
297n/a # the next rollover, but offset by 1 since we just calculated the time
298n/a # until the next day starts. There are three cases:
299n/a # Case 1) The day to rollover is today; in this case, do nothing
300n/a # Case 2) The day to rollover is further in the interval (i.e., today is
301n/a # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
302n/a # next rollover is simply 6 - 2 - 1, or 3.
303n/a # Case 3) The day to rollover is behind us in the interval (i.e., today
304n/a # is day 5 (Saturday) and rollover is on day 3 (Thursday).
305n/a # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
306n/a # number of days left in the current week (1) plus the number
307n/a # of days in the next week until the rollover day (3).
308n/a # The calculations described in 2) and 3) above need to have a day added.
309n/a # This is because the above time calculation takes us to midnight on this
310n/a # day, i.e. the start of the next day.
311n/a if self.when.startswith('W'):
312n/a day = currentDay # 0 is Monday
313n/a if day != self.dayOfWeek:
314n/a if day < self.dayOfWeek:
315n/a daysToWait = self.dayOfWeek - day
316n/a else:
317n/a daysToWait = 6 - day + self.dayOfWeek + 1
318n/a newRolloverAt = result + (daysToWait * (60 * 60 * 24))
319n/a if not self.utc:
320n/a dstNow = t[-1]
321n/a dstAtRollover = time.localtime(newRolloverAt)[-1]
322n/a if dstNow != dstAtRollover:
323n/a if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
324n/a addend = -3600
325n/a else: # DST bows out before next rollover, so we need to add an hour
326n/a addend = 3600
327n/a newRolloverAt += addend
328n/a result = newRolloverAt
329n/a return result
330n/a
331n/a def shouldRollover(self, record):
332n/a """
333n/a Determine if rollover should occur.
334n/a
335n/a record is not used, as we are just comparing times, but it is needed so
336n/a the method signatures are the same
337n/a """
338n/a t = int(time.time())
339n/a if t >= self.rolloverAt:
340n/a return 1
341n/a return 0
342n/a
343n/a def getFilesToDelete(self):
344n/a """
345n/a Determine the files to delete when rolling over.
346n/a
347n/a More specific than the earlier method, which just used glob.glob().
348n/a """
349n/a dirName, baseName = os.path.split(self.baseFilename)
350n/a fileNames = os.listdir(dirName)
351n/a result = []
352n/a prefix = baseName + "."
353n/a plen = len(prefix)
354n/a for fileName in fileNames:
355n/a if fileName[:plen] == prefix:
356n/a suffix = fileName[plen:]
357n/a if self.extMatch.match(suffix):
358n/a result.append(os.path.join(dirName, fileName))
359n/a result.sort()
360n/a if len(result) < self.backupCount:
361n/a result = []
362n/a else:
363n/a result = result[:len(result) - self.backupCount]
364n/a return result
365n/a
366n/a def doRollover(self):
367n/a """
368n/a do a rollover; in this case, a date/time stamp is appended to the filename
369n/a when the rollover happens. However, you want the file to be named for the
370n/a start of the interval, not the current time. If there is a backup count,
371n/a then we have to get a list of matching filenames, sort them and remove
372n/a the one with the oldest suffix.
373n/a """
374n/a if self.stream:
375n/a self.stream.close()
376n/a self.stream = None
377n/a # get the time that this sequence started at and make it a TimeTuple
378n/a currentTime = int(time.time())
379n/a dstNow = time.localtime(currentTime)[-1]
380n/a t = self.rolloverAt - self.interval
381n/a if self.utc:
382n/a timeTuple = time.gmtime(t)
383n/a else:
384n/a timeTuple = time.localtime(t)
385n/a dstThen = timeTuple[-1]
386n/a if dstNow != dstThen:
387n/a if dstNow:
388n/a addend = 3600
389n/a else:
390n/a addend = -3600
391n/a timeTuple = time.localtime(t + addend)
392n/a dfn = self.rotation_filename(self.baseFilename + "." +
393n/a time.strftime(self.suffix, timeTuple))
394n/a if os.path.exists(dfn):
395n/a os.remove(dfn)
396n/a self.rotate(self.baseFilename, dfn)
397n/a if self.backupCount > 0:
398n/a for s in self.getFilesToDelete():
399n/a os.remove(s)
400n/a if not self.delay:
401n/a self.stream = self._open()
402n/a newRolloverAt = self.computeRollover(currentTime)
403n/a while newRolloverAt <= currentTime:
404n/a newRolloverAt = newRolloverAt + self.interval
405n/a #If DST changes and midnight or weekly rollover, adjust for this.
406n/a if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
407n/a dstAtRollover = time.localtime(newRolloverAt)[-1]
408n/a if dstNow != dstAtRollover:
409n/a if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
410n/a addend = -3600
411n/a else: # DST bows out before next rollover, so we need to add an hour
412n/a addend = 3600
413n/a newRolloverAt += addend
414n/a self.rolloverAt = newRolloverAt
415n/a
416n/aclass WatchedFileHandler(logging.FileHandler):
417n/a """
418n/a A handler for logging to a file, which watches the file
419n/a to see if it has changed while in use. This can happen because of
420n/a usage of programs such as newsyslog and logrotate which perform
421n/a log file rotation. This handler, intended for use under Unix,
422n/a watches the file to see if it has changed since the last emit.
423n/a (A file has changed if its device or inode have changed.)
424n/a If it has changed, the old file stream is closed, and the file
425n/a opened to get a new stream.
426n/a
427n/a This handler is not appropriate for use under Windows, because
428n/a under Windows open files cannot be moved or renamed - logging
429n/a opens the files with exclusive locks - and so there is no need
430n/a for such a handler. Furthermore, ST_INO is not supported under
431n/a Windows; stat always returns zero for this value.
432n/a
433n/a This handler is based on a suggestion and patch by Chad J.
434n/a Schroeder.
435n/a """
436n/a def __init__(self, filename, mode='a', encoding=None, delay=False):
437n/a logging.FileHandler.__init__(self, filename, mode, encoding, delay)
438n/a self.dev, self.ino = -1, -1
439n/a self._statstream()
440n/a
441n/a def _statstream(self):
442n/a if self.stream:
443n/a sres = os.fstat(self.stream.fileno())
444n/a self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
445n/a
446n/a def reopenIfNeeded(self):
447n/a """
448n/a Reopen log file if needed.
449n/a
450n/a Checks if the underlying file has changed, and if it
451n/a has, close the old stream and reopen the file to get the
452n/a current stream.
453n/a """
454n/a # Reduce the chance of race conditions by stat'ing by path only
455n/a # once and then fstat'ing our new fd if we opened a new log stream.
456n/a # See issue #14632: Thanks to John Mulligan for the problem report
457n/a # and patch.
458n/a try:
459n/a # stat the file by path, checking for existence
460n/a sres = os.stat(self.baseFilename)
461n/a except FileNotFoundError:
462n/a sres = None
463n/a # compare file system stat with that of our stream file handle
464n/a if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
465n/a if self.stream is not None:
466n/a # we have an open file handle, clean it up
467n/a self.stream.flush()
468n/a self.stream.close()
469n/a self.stream = None # See Issue #21742: _open () might fail.
470n/a # open a new file handle and get new stat info from that fd
471n/a self.stream = self._open()
472n/a self._statstream()
473n/a
474n/a def emit(self, record):
475n/a """
476n/a Emit a record.
477n/a
478n/a If underlying file has changed, reopen the file before emitting the
479n/a record to it.
480n/a """
481n/a self.reopenIfNeeded()
482n/a logging.FileHandler.emit(self, record)
483n/a
484n/a
485n/aclass SocketHandler(logging.Handler):
486n/a """
487n/a A handler class which writes logging records, in pickle format, to
488n/a a streaming socket. The socket is kept open across logging calls.
489n/a If the peer resets it, an attempt is made to reconnect on the next call.
490n/a The pickle which is sent is that of the LogRecord's attribute dictionary
491n/a (__dict__), so that the receiver does not need to have the logging module
492n/a installed in order to process the logging event.
493n/a
494n/a To unpickle the record at the receiving end into a LogRecord, use the
495n/a makeLogRecord function.
496n/a """
497n/a
498n/a def __init__(self, host, port):
499n/a """
500n/a Initializes the handler with a specific host address and port.
501n/a
502n/a When the attribute *closeOnError* is set to True - if a socket error
503n/a occurs, the socket is silently closed and then reopened on the next
504n/a logging call.
505n/a """
506n/a logging.Handler.__init__(self)
507n/a self.host = host
508n/a self.port = port
509n/a if port is None:
510n/a self.address = host
511n/a else:
512n/a self.address = (host, port)
513n/a self.sock = None
514n/a self.closeOnError = False
515n/a self.retryTime = None
516n/a #
517n/a # Exponential backoff parameters.
518n/a #
519n/a self.retryStart = 1.0
520n/a self.retryMax = 30.0
521n/a self.retryFactor = 2.0
522n/a
523n/a def makeSocket(self, timeout=1):
524n/a """
525n/a A factory method which allows subclasses to define the precise
526n/a type of socket they want.
527n/a """
528n/a if self.port is not None:
529n/a result = socket.create_connection(self.address, timeout=timeout)
530n/a else:
531n/a result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
532n/a result.settimeout(timeout)
533n/a try:
534n/a result.connect(self.address)
535n/a except OSError:
536n/a result.close() # Issue 19182
537n/a raise
538n/a return result
539n/a
540n/a def createSocket(self):
541n/a """
542n/a Try to create a socket, using an exponential backoff with
543n/a a max retry time. Thanks to Robert Olson for the original patch
544n/a (SF #815911) which has been slightly refactored.
545n/a """
546n/a now = time.time()
547n/a # Either retryTime is None, in which case this
548n/a # is the first time back after a disconnect, or
549n/a # we've waited long enough.
550n/a if self.retryTime is None:
551n/a attempt = True
552n/a else:
553n/a attempt = (now >= self.retryTime)
554n/a if attempt:
555n/a try:
556n/a self.sock = self.makeSocket()
557n/a self.retryTime = None # next time, no delay before trying
558n/a except OSError:
559n/a #Creation failed, so set the retry time and return.
560n/a if self.retryTime is None:
561n/a self.retryPeriod = self.retryStart
562n/a else:
563n/a self.retryPeriod = self.retryPeriod * self.retryFactor
564n/a if self.retryPeriod > self.retryMax:
565n/a self.retryPeriod = self.retryMax
566n/a self.retryTime = now + self.retryPeriod
567n/a
568n/a def send(self, s):
569n/a """
570n/a Send a pickled string to the socket.
571n/a
572n/a This function allows for partial sends which can happen when the
573n/a network is busy.
574n/a """
575n/a if self.sock is None:
576n/a self.createSocket()
577n/a #self.sock can be None either because we haven't reached the retry
578n/a #time yet, or because we have reached the retry time and retried,
579n/a #but are still unable to connect.
580n/a if self.sock:
581n/a try:
582n/a self.sock.sendall(s)
583n/a except OSError: #pragma: no cover
584n/a self.sock.close()
585n/a self.sock = None # so we can call createSocket next time
586n/a
587n/a def makePickle(self, record):
588n/a """
589n/a Pickles the record in binary format with a length prefix, and
590n/a returns it ready for transmission across the socket.
591n/a """
592n/a ei = record.exc_info
593n/a if ei:
594n/a # just to get traceback text into record.exc_text ...
595n/a dummy = self.format(record)
596n/a # See issue #14436: If msg or args are objects, they may not be
597n/a # available on the receiving end. So we convert the msg % args
598n/a # to a string, save it as msg and zap the args.
599n/a d = dict(record.__dict__)
600n/a d['msg'] = record.getMessage()
601n/a d['args'] = None
602n/a d['exc_info'] = None
603n/a # Issue #25685: delete 'message' if present: redundant with 'msg'
604n/a d.pop('message', None)
605n/a s = pickle.dumps(d, 1)
606n/a slen = struct.pack(">L", len(s))
607n/a return slen + s
608n/a
609n/a def handleError(self, record):
610n/a """
611n/a Handle an error during logging.
612n/a
613n/a An error has occurred during logging. Most likely cause -
614n/a connection lost. Close the socket so that we can retry on the
615n/a next event.
616n/a """
617n/a if self.closeOnError and self.sock:
618n/a self.sock.close()
619n/a self.sock = None #try to reconnect next time
620n/a else:
621n/a logging.Handler.handleError(self, record)
622n/a
623n/a def emit(self, record):
624n/a """
625n/a Emit a record.
626n/a
627n/a Pickles the record and writes it to the socket in binary format.
628n/a If there is an error with the socket, silently drop the packet.
629n/a If there was a problem with the socket, re-establishes the
630n/a socket.
631n/a """
632n/a try:
633n/a s = self.makePickle(record)
634n/a self.send(s)
635n/a except Exception:
636n/a self.handleError(record)
637n/a
638n/a def close(self):
639n/a """
640n/a Closes the socket.
641n/a """
642n/a self.acquire()
643n/a try:
644n/a sock = self.sock
645n/a if sock:
646n/a self.sock = None
647n/a sock.close()
648n/a logging.Handler.close(self)
649n/a finally:
650n/a self.release()
651n/a
652n/aclass DatagramHandler(SocketHandler):
653n/a """
654n/a A handler class which writes logging records, in pickle format, to
655n/a a datagram socket. The pickle which is sent is that of the LogRecord's
656n/a attribute dictionary (__dict__), so that the receiver does not need to
657n/a have the logging module installed in order to process the logging event.
658n/a
659n/a To unpickle the record at the receiving end into a LogRecord, use the
660n/a makeLogRecord function.
661n/a
662n/a """
663n/a def __init__(self, host, port):
664n/a """
665n/a Initializes the handler with a specific host address and port.
666n/a """
667n/a SocketHandler.__init__(self, host, port)
668n/a self.closeOnError = False
669n/a
670n/a def makeSocket(self):
671n/a """
672n/a The factory method of SocketHandler is here overridden to create
673n/a a UDP socket (SOCK_DGRAM).
674n/a """
675n/a if self.port is None:
676n/a family = socket.AF_UNIX
677n/a else:
678n/a family = socket.AF_INET
679n/a s = socket.socket(family, socket.SOCK_DGRAM)
680n/a return s
681n/a
682n/a def send(self, s):
683n/a """
684n/a Send a pickled string to a socket.
685n/a
686n/a This function no longer allows for partial sends which can happen
687n/a when the network is busy - UDP does not guarantee delivery and
688n/a can deliver packets out of sequence.
689n/a """
690n/a if self.sock is None:
691n/a self.createSocket()
692n/a self.sock.sendto(s, self.address)
693n/a
694n/aclass SysLogHandler(logging.Handler):
695n/a """
696n/a A handler class which sends formatted logging records to a syslog
697n/a server. Based on Sam Rushing's syslog module:
698n/a http://www.nightmare.com/squirl/python-ext/misc/syslog.py
699n/a Contributed by Nicolas Untz (after which minor refactoring changes
700n/a have been made).
701n/a """
702n/a
703n/a # from <linux/sys/syslog.h>:
704n/a # ======================================================================
705n/a # priorities/facilities are encoded into a single 32-bit quantity, where
706n/a # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
707n/a # facility (0-big number). Both the priorities and the facilities map
708n/a # roughly one-to-one to strings in the syslogd(8) source code. This
709n/a # mapping is included in this file.
710n/a #
711n/a # priorities (these are ordered)
712n/a
713n/a LOG_EMERG = 0 # system is unusable
714n/a LOG_ALERT = 1 # action must be taken immediately
715n/a LOG_CRIT = 2 # critical conditions
716n/a LOG_ERR = 3 # error conditions
717n/a LOG_WARNING = 4 # warning conditions
718n/a LOG_NOTICE = 5 # normal but significant condition
719n/a LOG_INFO = 6 # informational
720n/a LOG_DEBUG = 7 # debug-level messages
721n/a
722n/a # facility codes
723n/a LOG_KERN = 0 # kernel messages
724n/a LOG_USER = 1 # random user-level messages
725n/a LOG_MAIL = 2 # mail system
726n/a LOG_DAEMON = 3 # system daemons
727n/a LOG_AUTH = 4 # security/authorization messages
728n/a LOG_SYSLOG = 5 # messages generated internally by syslogd
729n/a LOG_LPR = 6 # line printer subsystem
730n/a LOG_NEWS = 7 # network news subsystem
731n/a LOG_UUCP = 8 # UUCP subsystem
732n/a LOG_CRON = 9 # clock daemon
733n/a LOG_AUTHPRIV = 10 # security/authorization messages (private)
734n/a LOG_FTP = 11 # FTP daemon
735n/a
736n/a # other codes through 15 reserved for system use
737n/a LOG_LOCAL0 = 16 # reserved for local use
738n/a LOG_LOCAL1 = 17 # reserved for local use
739n/a LOG_LOCAL2 = 18 # reserved for local use
740n/a LOG_LOCAL3 = 19 # reserved for local use
741n/a LOG_LOCAL4 = 20 # reserved for local use
742n/a LOG_LOCAL5 = 21 # reserved for local use
743n/a LOG_LOCAL6 = 22 # reserved for local use
744n/a LOG_LOCAL7 = 23 # reserved for local use
745n/a
746n/a priority_names = {
747n/a "alert": LOG_ALERT,
748n/a "crit": LOG_CRIT,
749n/a "critical": LOG_CRIT,
750n/a "debug": LOG_DEBUG,
751n/a "emerg": LOG_EMERG,
752n/a "err": LOG_ERR,
753n/a "error": LOG_ERR, # DEPRECATED
754n/a "info": LOG_INFO,
755n/a "notice": LOG_NOTICE,
756n/a "panic": LOG_EMERG, # DEPRECATED
757n/a "warn": LOG_WARNING, # DEPRECATED
758n/a "warning": LOG_WARNING,
759n/a }
760n/a
761n/a facility_names = {
762n/a "auth": LOG_AUTH,
763n/a "authpriv": LOG_AUTHPRIV,
764n/a "cron": LOG_CRON,
765n/a "daemon": LOG_DAEMON,
766n/a "ftp": LOG_FTP,
767n/a "kern": LOG_KERN,
768n/a "lpr": LOG_LPR,
769n/a "mail": LOG_MAIL,
770n/a "news": LOG_NEWS,
771n/a "security": LOG_AUTH, # DEPRECATED
772n/a "syslog": LOG_SYSLOG,
773n/a "user": LOG_USER,
774n/a "uucp": LOG_UUCP,
775n/a "local0": LOG_LOCAL0,
776n/a "local1": LOG_LOCAL1,
777n/a "local2": LOG_LOCAL2,
778n/a "local3": LOG_LOCAL3,
779n/a "local4": LOG_LOCAL4,
780n/a "local5": LOG_LOCAL5,
781n/a "local6": LOG_LOCAL6,
782n/a "local7": LOG_LOCAL7,
783n/a }
784n/a
785n/a #The map below appears to be trivially lowercasing the key. However,
786n/a #there's more to it than meets the eye - in some locales, lowercasing
787n/a #gives unexpected results. See SF #1524081: in the Turkish locale,
788n/a #"INFO".lower() != "info"
789n/a priority_map = {
790n/a "DEBUG" : "debug",
791n/a "INFO" : "info",
792n/a "WARNING" : "warning",
793n/a "ERROR" : "error",
794n/a "CRITICAL" : "critical"
795n/a }
796n/a
797n/a def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
798n/a facility=LOG_USER, socktype=None):
799n/a """
800n/a Initialize a handler.
801n/a
802n/a If address is specified as a string, a UNIX socket is used. To log to a
803n/a local syslogd, "SysLogHandler(address="/dev/log")" can be used.
804n/a If facility is not specified, LOG_USER is used. If socktype is
805n/a specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
806n/a socket type will be used. For Unix sockets, you can also specify a
807n/a socktype of None, in which case socket.SOCK_DGRAM will be used, falling
808n/a back to socket.SOCK_STREAM.
809n/a """
810n/a logging.Handler.__init__(self)
811n/a
812n/a self.address = address
813n/a self.facility = facility
814n/a self.socktype = socktype
815n/a
816n/a if isinstance(address, str):
817n/a self.unixsocket = True
818n/a self._connect_unixsocket(address)
819n/a else:
820n/a self.unixsocket = False
821n/a if socktype is None:
822n/a socktype = socket.SOCK_DGRAM
823n/a self.socket = socket.socket(socket.AF_INET, socktype)
824n/a if socktype == socket.SOCK_STREAM:
825n/a self.socket.connect(address)
826n/a self.socktype = socktype
827n/a self.formatter = None
828n/a
829n/a def _connect_unixsocket(self, address):
830n/a use_socktype = self.socktype
831n/a if use_socktype is None:
832n/a use_socktype = socket.SOCK_DGRAM
833n/a self.socket = socket.socket(socket.AF_UNIX, use_socktype)
834n/a try:
835n/a self.socket.connect(address)
836n/a # it worked, so set self.socktype to the used type
837n/a self.socktype = use_socktype
838n/a except OSError:
839n/a self.socket.close()
840n/a if self.socktype is not None:
841n/a # user didn't specify falling back, so fail
842n/a raise
843n/a use_socktype = socket.SOCK_STREAM
844n/a self.socket = socket.socket(socket.AF_UNIX, use_socktype)
845n/a try:
846n/a self.socket.connect(address)
847n/a # it worked, so set self.socktype to the used type
848n/a self.socktype = use_socktype
849n/a except OSError:
850n/a self.socket.close()
851n/a raise
852n/a
853n/a def encodePriority(self, facility, priority):
854n/a """
855n/a Encode the facility and priority. You can pass in strings or
856n/a integers - if strings are passed, the facility_names and
857n/a priority_names mapping dictionaries are used to convert them to
858n/a integers.
859n/a """
860n/a if isinstance(facility, str):
861n/a facility = self.facility_names[facility]
862n/a if isinstance(priority, str):
863n/a priority = self.priority_names[priority]
864n/a return (facility << 3) | priority
865n/a
866n/a def close (self):
867n/a """
868n/a Closes the socket.
869n/a """
870n/a self.acquire()
871n/a try:
872n/a self.socket.close()
873n/a logging.Handler.close(self)
874n/a finally:
875n/a self.release()
876n/a
877n/a def mapPriority(self, levelName):
878n/a """
879n/a Map a logging level name to a key in the priority_names map.
880n/a This is useful in two scenarios: when custom levels are being
881n/a used, and in the case where you can't do a straightforward
882n/a mapping by lowercasing the logging level name because of locale-
883n/a specific issues (see SF #1524081).
884n/a """
885n/a return self.priority_map.get(levelName, "warning")
886n/a
887n/a ident = '' # prepended to all messages
888n/a append_nul = True # some old syslog daemons expect a NUL terminator
889n/a
890n/a def emit(self, record):
891n/a """
892n/a Emit a record.
893n/a
894n/a The record is formatted, and then sent to the syslog server. If
895n/a exception information is present, it is NOT sent to the server.
896n/a """
897n/a try:
898n/a msg = self.format(record)
899n/a if self.ident:
900n/a msg = self.ident + msg
901n/a if self.append_nul:
902n/a msg += '\000'
903n/a
904n/a # We need to convert record level to lowercase, maybe this will
905n/a # change in the future.
906n/a prio = '<%d>' % self.encodePriority(self.facility,
907n/a self.mapPriority(record.levelname))
908n/a prio = prio.encode('utf-8')
909n/a # Message is a string. Convert to bytes as required by RFC 5424
910n/a msg = msg.encode('utf-8')
911n/a msg = prio + msg
912n/a if self.unixsocket:
913n/a try:
914n/a self.socket.send(msg)
915n/a except OSError:
916n/a self.socket.close()
917n/a self._connect_unixsocket(self.address)
918n/a self.socket.send(msg)
919n/a elif self.socktype == socket.SOCK_DGRAM:
920n/a self.socket.sendto(msg, self.address)
921n/a else:
922n/a self.socket.sendall(msg)
923n/a except Exception:
924n/a self.handleError(record)
925n/a
926n/aclass SMTPHandler(logging.Handler):
927n/a """
928n/a A handler class which sends an SMTP email for each logging event.
929n/a """
930n/a def __init__(self, mailhost, fromaddr, toaddrs, subject,
931n/a credentials=None, secure=None, timeout=5.0):
932n/a """
933n/a Initialize the handler.
934n/a
935n/a Initialize the instance with the from and to addresses and subject
936n/a line of the email. To specify a non-standard SMTP port, use the
937n/a (host, port) tuple format for the mailhost argument. To specify
938n/a authentication credentials, supply a (username, password) tuple
939n/a for the credentials argument. To specify the use of a secure
940n/a protocol (TLS), pass in a tuple for the secure argument. This will
941n/a only be used when authentication credentials are supplied. The tuple
942n/a will be either an empty tuple, or a single-value tuple with the name
943n/a of a keyfile, or a 2-value tuple with the names of the keyfile and
944n/a certificate file. (This tuple is passed to the `starttls` method).
945n/a A timeout in seconds can be specified for the SMTP connection (the
946n/a default is one second).
947n/a """
948n/a logging.Handler.__init__(self)
949n/a if isinstance(mailhost, (list, tuple)):
950n/a self.mailhost, self.mailport = mailhost
951n/a else:
952n/a self.mailhost, self.mailport = mailhost, None
953n/a if isinstance(credentials, (list, tuple)):
954n/a self.username, self.password = credentials
955n/a else:
956n/a self.username = None
957n/a self.fromaddr = fromaddr
958n/a if isinstance(toaddrs, str):
959n/a toaddrs = [toaddrs]
960n/a self.toaddrs = toaddrs
961n/a self.subject = subject
962n/a self.secure = secure
963n/a self.timeout = timeout
964n/a
965n/a def getSubject(self, record):
966n/a """
967n/a Determine the subject for the email.
968n/a
969n/a If you want to specify a subject line which is record-dependent,
970n/a override this method.
971n/a """
972n/a return self.subject
973n/a
974n/a def emit(self, record):
975n/a """
976n/a Emit a record.
977n/a
978n/a Format the record and send it to the specified addressees.
979n/a """
980n/a try:
981n/a import smtplib
982n/a from email.message import EmailMessage
983n/a import email.utils
984n/a
985n/a port = self.mailport
986n/a if not port:
987n/a port = smtplib.SMTP_PORT
988n/a smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
989n/a msg = EmailMessage()
990n/a msg['From'] = self.fromaddr
991n/a msg['To'] = ','.join(self.toaddrs)
992n/a msg['Subject'] = self.getSubject(record)
993n/a msg['Date'] = email.utils.localtime()
994n/a msg.set_content(self.format(record))
995n/a if self.username:
996n/a if self.secure is not None:
997n/a smtp.ehlo()
998n/a smtp.starttls(*self.secure)
999n/a smtp.ehlo()
1000n/a smtp.login(self.username, self.password)
1001n/a smtp.send_message(msg)
1002n/a smtp.quit()
1003n/a except Exception:
1004n/a self.handleError(record)
1005n/a
1006n/aclass NTEventLogHandler(logging.Handler):
1007n/a """
1008n/a A handler class which sends events to the NT Event Log. Adds a
1009n/a registry entry for the specified application name. If no dllname is
1010n/a provided, win32service.pyd (which contains some basic message
1011n/a placeholders) is used. Note that use of these placeholders will make
1012n/a your event logs big, as the entire message source is held in the log.
1013n/a If you want slimmer logs, you have to pass in the name of your own DLL
1014n/a which contains the message definitions you want to use in the event log.
1015n/a """
1016n/a def __init__(self, appname, dllname=None, logtype="Application"):
1017n/a logging.Handler.__init__(self)
1018n/a try:
1019n/a import win32evtlogutil, win32evtlog
1020n/a self.appname = appname
1021n/a self._welu = win32evtlogutil
1022n/a if not dllname:
1023n/a dllname = os.path.split(self._welu.__file__)
1024n/a dllname = os.path.split(dllname[0])
1025n/a dllname = os.path.join(dllname[0], r'win32service.pyd')
1026n/a self.dllname = dllname
1027n/a self.logtype = logtype
1028n/a self._welu.AddSourceToRegistry(appname, dllname, logtype)
1029n/a self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1030n/a self.typemap = {
1031n/a logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1032n/a logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1033n/a logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1034n/a logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
1035n/a logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1036n/a }
1037n/a except ImportError:
1038n/a print("The Python Win32 extensions for NT (service, event "\
1039n/a "logging) appear not to be available.")
1040n/a self._welu = None
1041n/a
1042n/a def getMessageID(self, record):
1043n/a """
1044n/a Return the message ID for the event record. If you are using your
1045n/a own messages, you could do this by having the msg passed to the
1046n/a logger being an ID rather than a formatting string. Then, in here,
1047n/a you could use a dictionary lookup to get the message ID. This
1048n/a version returns 1, which is the base message ID in win32service.pyd.
1049n/a """
1050n/a return 1
1051n/a
1052n/a def getEventCategory(self, record):
1053n/a """
1054n/a Return the event category for the record.
1055n/a
1056n/a Override this if you want to specify your own categories. This version
1057n/a returns 0.
1058n/a """
1059n/a return 0
1060n/a
1061n/a def getEventType(self, record):
1062n/a """
1063n/a Return the event type for the record.
1064n/a
1065n/a Override this if you want to specify your own types. This version does
1066n/a a mapping using the handler's typemap attribute, which is set up in
1067n/a __init__() to a dictionary which contains mappings for DEBUG, INFO,
1068n/a WARNING, ERROR and CRITICAL. If you are using your own levels you will
1069n/a either need to override this method or place a suitable dictionary in
1070n/a the handler's typemap attribute.
1071n/a """
1072n/a return self.typemap.get(record.levelno, self.deftype)
1073n/a
1074n/a def emit(self, record):
1075n/a """
1076n/a Emit a record.
1077n/a
1078n/a Determine the message ID, event category and event type. Then
1079n/a log the message in the NT event log.
1080n/a """
1081n/a if self._welu:
1082n/a try:
1083n/a id = self.getMessageID(record)
1084n/a cat = self.getEventCategory(record)
1085n/a type = self.getEventType(record)
1086n/a msg = self.format(record)
1087n/a self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1088n/a except Exception:
1089n/a self.handleError(record)
1090n/a
1091n/a def close(self):
1092n/a """
1093n/a Clean up this handler.
1094n/a
1095n/a You can remove the application name from the registry as a
1096n/a source of event log entries. However, if you do this, you will
1097n/a not be able to see the events as you intended in the Event Log
1098n/a Viewer - it needs to be able to access the registry to get the
1099n/a DLL name.
1100n/a """
1101n/a #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1102n/a logging.Handler.close(self)
1103n/a
1104n/aclass HTTPHandler(logging.Handler):
1105n/a """
1106n/a A class which sends records to a Web server, using either GET or
1107n/a POST semantics.
1108n/a """
1109n/a def __init__(self, host, url, method="GET", secure=False, credentials=None,
1110n/a context=None):
1111n/a """
1112n/a Initialize the instance with the host, the request URL, and the method
1113n/a ("GET" or "POST")
1114n/a """
1115n/a logging.Handler.__init__(self)
1116n/a method = method.upper()
1117n/a if method not in ["GET", "POST"]:
1118n/a raise ValueError("method must be GET or POST")
1119n/a if not secure and context is not None:
1120n/a raise ValueError("context parameter only makes sense "
1121n/a "with secure=True")
1122n/a self.host = host
1123n/a self.url = url
1124n/a self.method = method
1125n/a self.secure = secure
1126n/a self.credentials = credentials
1127n/a self.context = context
1128n/a
1129n/a def mapLogRecord(self, record):
1130n/a """
1131n/a Default implementation of mapping the log record into a dict
1132n/a that is sent as the CGI data. Overwrite in your class.
1133n/a Contributed by Franz Glasner.
1134n/a """
1135n/a return record.__dict__
1136n/a
1137n/a def emit(self, record):
1138n/a """
1139n/a Emit a record.
1140n/a
1141n/a Send the record to the Web server as a percent-encoded dictionary
1142n/a """
1143n/a try:
1144n/a import http.client, urllib.parse
1145n/a host = self.host
1146n/a if self.secure:
1147n/a h = http.client.HTTPSConnection(host, context=self.context)
1148n/a else:
1149n/a h = http.client.HTTPConnection(host)
1150n/a url = self.url
1151n/a data = urllib.parse.urlencode(self.mapLogRecord(record))
1152n/a if self.method == "GET":
1153n/a if (url.find('?') >= 0):
1154n/a sep = '&'
1155n/a else:
1156n/a sep = '?'
1157n/a url = url + "%c%s" % (sep, data)
1158n/a h.putrequest(self.method, url)
1159n/a # support multiple hosts on one IP address...
1160n/a # need to strip optional :port from host, if present
1161n/a i = host.find(":")
1162n/a if i >= 0:
1163n/a host = host[:i]
1164n/a h.putheader("Host", host)
1165n/a if self.method == "POST":
1166n/a h.putheader("Content-type",
1167n/a "application/x-www-form-urlencoded")
1168n/a h.putheader("Content-length", str(len(data)))
1169n/a if self.credentials:
1170n/a import base64
1171n/a s = ('%s:%s' % self.credentials).encode('utf-8')
1172n/a s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1173n/a h.putheader('Authorization', s)
1174n/a h.endheaders()
1175n/a if self.method == "POST":
1176n/a h.send(data.encode('utf-8'))
1177n/a h.getresponse() #can't do anything with the result
1178n/a except Exception:
1179n/a self.handleError(record)
1180n/a
1181n/aclass BufferingHandler(logging.Handler):
1182n/a """
1183n/a A handler class which buffers logging records in memory. Whenever each
1184n/a record is added to the buffer, a check is made to see if the buffer should
1185n/a be flushed. If it should, then flush() is expected to do what's needed.
1186n/a """
1187n/a def __init__(self, capacity):
1188n/a """
1189n/a Initialize the handler with the buffer size.
1190n/a """
1191n/a logging.Handler.__init__(self)
1192n/a self.capacity = capacity
1193n/a self.buffer = []
1194n/a
1195n/a def shouldFlush(self, record):
1196n/a """
1197n/a Should the handler flush its buffer?
1198n/a
1199n/a Returns true if the buffer is up to capacity. This method can be
1200n/a overridden to implement custom flushing strategies.
1201n/a """
1202n/a return (len(self.buffer) >= self.capacity)
1203n/a
1204n/a def emit(self, record):
1205n/a """
1206n/a Emit a record.
1207n/a
1208n/a Append the record. If shouldFlush() tells us to, call flush() to process
1209n/a the buffer.
1210n/a """
1211n/a self.buffer.append(record)
1212n/a if self.shouldFlush(record):
1213n/a self.flush()
1214n/a
1215n/a def flush(self):
1216n/a """
1217n/a Override to implement custom flushing behaviour.
1218n/a
1219n/a This version just zaps the buffer to empty.
1220n/a """
1221n/a self.acquire()
1222n/a try:
1223n/a self.buffer = []
1224n/a finally:
1225n/a self.release()
1226n/a
1227n/a def close(self):
1228n/a """
1229n/a Close the handler.
1230n/a
1231n/a This version just flushes and chains to the parent class' close().
1232n/a """
1233n/a try:
1234n/a self.flush()
1235n/a finally:
1236n/a logging.Handler.close(self)
1237n/a
1238n/aclass MemoryHandler(BufferingHandler):
1239n/a """
1240n/a A handler class which buffers logging records in memory, periodically
1241n/a flushing them to a target handler. Flushing occurs whenever the buffer
1242n/a is full, or when an event of a certain severity or greater is seen.
1243n/a """
1244n/a def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1245n/a flushOnClose=True):
1246n/a """
1247n/a Initialize the handler with the buffer size, the level at which
1248n/a flushing should occur and an optional target.
1249n/a
1250n/a Note that without a target being set either here or via setTarget(),
1251n/a a MemoryHandler is no use to anyone!
1252n/a
1253n/a The ``flushOnClose`` argument is ``True`` for backward compatibility
1254n/a reasons - the old behaviour is that when the handler is closed, the
1255n/a buffer is flushed, even if the flush level hasn't been exceeded nor the
1256n/a capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1257n/a """
1258n/a BufferingHandler.__init__(self, capacity)
1259n/a self.flushLevel = flushLevel
1260n/a self.target = target
1261n/a # See Issue #26559 for why this has been added
1262n/a self.flushOnClose = flushOnClose
1263n/a
1264n/a def shouldFlush(self, record):
1265n/a """
1266n/a Check for buffer full or a record at the flushLevel or higher.
1267n/a """
1268n/a return (len(self.buffer) >= self.capacity) or \
1269n/a (record.levelno >= self.flushLevel)
1270n/a
1271n/a def setTarget(self, target):
1272n/a """
1273n/a Set the target handler for this handler.
1274n/a """
1275n/a self.target = target
1276n/a
1277n/a def flush(self):
1278n/a """
1279n/a For a MemoryHandler, flushing means just sending the buffered
1280n/a records to the target, if there is one. Override if you want
1281n/a different behaviour.
1282n/a
1283n/a The record buffer is also cleared by this operation.
1284n/a """
1285n/a self.acquire()
1286n/a try:
1287n/a if self.target:
1288n/a for record in self.buffer:
1289n/a self.target.handle(record)
1290n/a self.buffer = []
1291n/a finally:
1292n/a self.release()
1293n/a
1294n/a def close(self):
1295n/a """
1296n/a Flush, if appropriately configured, set the target to None and lose the
1297n/a buffer.
1298n/a """
1299n/a try:
1300n/a if self.flushOnClose:
1301n/a self.flush()
1302n/a finally:
1303n/a self.acquire()
1304n/a try:
1305n/a self.target = None
1306n/a BufferingHandler.close(self)
1307n/a finally:
1308n/a self.release()
1309n/a
1310n/a
1311n/aclass QueueHandler(logging.Handler):
1312n/a """
1313n/a This handler sends events to a queue. Typically, it would be used together
1314n/a with a multiprocessing Queue to centralise logging to file in one process
1315n/a (in a multi-process application), so as to avoid file write contention
1316n/a between processes.
1317n/a
1318n/a This code is new in Python 3.2, but this class can be copy pasted into
1319n/a user code for use with earlier Python versions.
1320n/a """
1321n/a
1322n/a def __init__(self, queue):
1323n/a """
1324n/a Initialise an instance, using the passed queue.
1325n/a """
1326n/a logging.Handler.__init__(self)
1327n/a self.queue = queue
1328n/a
1329n/a def enqueue(self, record):
1330n/a """
1331n/a Enqueue a record.
1332n/a
1333n/a The base implementation uses put_nowait. You may want to override
1334n/a this method if you want to use blocking, timeouts or custom queue
1335n/a implementations.
1336n/a """
1337n/a self.queue.put_nowait(record)
1338n/a
1339n/a def prepare(self, record):
1340n/a """
1341n/a Prepares a record for queuing. The object returned by this method is
1342n/a enqueued.
1343n/a
1344n/a The base implementation formats the record to merge the message
1345n/a and arguments, and removes unpickleable items from the record
1346n/a in-place.
1347n/a
1348n/a You might want to override this method if you want to convert
1349n/a the record to a dict or JSON string, or send a modified copy
1350n/a of the record while leaving the original intact.
1351n/a """
1352n/a # The format operation gets traceback text into record.exc_text
1353n/a # (if there's exception data), and also puts the message into
1354n/a # record.message. We can then use this to replace the original
1355n/a # msg + args, as these might be unpickleable. We also zap the
1356n/a # exc_info attribute, as it's no longer needed and, if not None,
1357n/a # will typically not be pickleable.
1358n/a self.format(record)
1359n/a record.msg = record.message
1360n/a record.args = None
1361n/a record.exc_info = None
1362n/a return record
1363n/a
1364n/a def emit(self, record):
1365n/a """
1366n/a Emit a record.
1367n/a
1368n/a Writes the LogRecord to the queue, preparing it for pickling first.
1369n/a """
1370n/a try:
1371n/a self.enqueue(self.prepare(record))
1372n/a except Exception:
1373n/a self.handleError(record)
1374n/a
1375n/aif threading:
1376n/a class QueueListener(object):
1377n/a """
1378n/a This class implements an internal threaded listener which watches for
1379n/a LogRecords being added to a queue, removes them and passes them to a
1380n/a list of handlers for processing.
1381n/a """
1382n/a _sentinel = None
1383n/a
1384n/a def __init__(self, queue, *handlers, respect_handler_level=False):
1385n/a """
1386n/a Initialise an instance with the specified queue and
1387n/a handlers.
1388n/a """
1389n/a self.queue = queue
1390n/a self.handlers = handlers
1391n/a self._thread = None
1392n/a self.respect_handler_level = respect_handler_level
1393n/a
1394n/a def dequeue(self, block):
1395n/a """
1396n/a Dequeue a record and return it, optionally blocking.
1397n/a
1398n/a The base implementation uses get. You may want to override this method
1399n/a if you want to use timeouts or work with custom queue implementations.
1400n/a """
1401n/a return self.queue.get(block)
1402n/a
1403n/a def start(self):
1404n/a """
1405n/a Start the listener.
1406n/a
1407n/a This starts up a background thread to monitor the queue for
1408n/a LogRecords to process.
1409n/a """
1410n/a self._thread = t = threading.Thread(target=self._monitor)
1411n/a t.daemon = True
1412n/a t.start()
1413n/a
1414n/a def prepare(self , record):
1415n/a """
1416n/a Prepare a record for handling.
1417n/a
1418n/a This method just returns the passed-in record. You may want to
1419n/a override this method if you need to do any custom marshalling or
1420n/a manipulation of the record before passing it to the handlers.
1421n/a """
1422n/a return record
1423n/a
1424n/a def handle(self, record):
1425n/a """
1426n/a Handle a record.
1427n/a
1428n/a This just loops through the handlers offering them the record
1429n/a to handle.
1430n/a """
1431n/a record = self.prepare(record)
1432n/a for handler in self.handlers:
1433n/a if not self.respect_handler_level:
1434n/a process = True
1435n/a else:
1436n/a process = record.levelno >= handler.level
1437n/a if process:
1438n/a handler.handle(record)
1439n/a
1440n/a def _monitor(self):
1441n/a """
1442n/a Monitor the queue for records, and ask the handler
1443n/a to deal with them.
1444n/a
1445n/a This method runs on a separate, internal thread.
1446n/a The thread will terminate if it sees a sentinel object in the queue.
1447n/a """
1448n/a q = self.queue
1449n/a has_task_done = hasattr(q, 'task_done')
1450n/a while True:
1451n/a try:
1452n/a record = self.dequeue(True)
1453n/a if record is self._sentinel:
1454n/a break
1455n/a self.handle(record)
1456n/a if has_task_done:
1457n/a q.task_done()
1458n/a except queue.Empty:
1459n/a break
1460n/a
1461n/a def enqueue_sentinel(self):
1462n/a """
1463n/a This is used to enqueue the sentinel record.
1464n/a
1465n/a The base implementation uses put_nowait. You may want to override this
1466n/a method if you want to use timeouts or work with custom queue
1467n/a implementations.
1468n/a """
1469n/a self.queue.put_nowait(self._sentinel)
1470n/a
1471n/a def stop(self):
1472n/a """
1473n/a Stop the listener.
1474n/a
1475n/a This asks the thread to terminate, and then waits for it to do so.
1476n/a Note that if you don't call this before your application exits, there
1477n/a may be some records still left on the queue, which won't be processed.
1478n/a """
1479n/a self.enqueue_sentinel()
1480n/a self._thread.join()
1481n/a self._thread = None