comparison env/lib/python3.7/site-packages/psutil/__init__.py @ 2:6af9afd405e9 draft

"planemo upload commit 0a63dd5f4d38a1f6944587f52a8cd79874177fc1"
author shellac
date Thu, 14 May 2020 14:56:58 -0400
parents 26e78fe6e8c4
children
comparison
equal deleted inserted replaced
1:75ca89e9b81c 2:6af9afd405e9
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
6
7 """psutil is a cross-platform library for retrieving information on
8 running processes and system utilization (CPU, memory, disks, network,
9 sensors) in Python. Supported platforms:
10
11 - Linux
12 - Windows
13 - macOS
14 - FreeBSD
15 - OpenBSD
16 - NetBSD
17 - Sun Solaris
18 - AIX
19
20 Works with Python versions from 2.6 to 3.4+.
21 """
22
23 from __future__ import division
24
25 import collections
26 import contextlib
27 import datetime
28 import functools
29 import os
30 import signal
31 import subprocess
32 import sys
33 import threading
34 import time
35 try:
36 import pwd
37 except ImportError:
38 pwd = None
39
40 from . import _common
41 from ._common import AccessDenied
42 from ._common import deprecated_method
43 from ._common import Error
44 from ._common import memoize
45 from ._common import memoize_when_activated
46 from ._common import NoSuchProcess
47 from ._common import TimeoutExpired
48 from ._common import wrap_numbers as _wrap_numbers
49 from ._common import ZombieProcess
50 from ._compat import long
51 from ._compat import PermissionError
52 from ._compat import ProcessLookupError
53 from ._compat import PY3 as _PY3
54
55 from ._common import STATUS_DEAD
56 from ._common import STATUS_DISK_SLEEP
57 from ._common import STATUS_IDLE
58 from ._common import STATUS_LOCKED
59 from ._common import STATUS_PARKED
60 from ._common import STATUS_RUNNING
61 from ._common import STATUS_SLEEPING
62 from ._common import STATUS_STOPPED
63 from ._common import STATUS_TRACING_STOP
64 from ._common import STATUS_WAITING
65 from ._common import STATUS_WAKING
66 from ._common import STATUS_ZOMBIE
67
68 from ._common import CONN_CLOSE
69 from ._common import CONN_CLOSE_WAIT
70 from ._common import CONN_CLOSING
71 from ._common import CONN_ESTABLISHED
72 from ._common import CONN_FIN_WAIT1
73 from ._common import CONN_FIN_WAIT2
74 from ._common import CONN_LAST_ACK
75 from ._common import CONN_LISTEN
76 from ._common import CONN_NONE
77 from ._common import CONN_SYN_RECV
78 from ._common import CONN_SYN_SENT
79 from ._common import CONN_TIME_WAIT
80 from ._common import NIC_DUPLEX_FULL
81 from ._common import NIC_DUPLEX_HALF
82 from ._common import NIC_DUPLEX_UNKNOWN
83
84 from ._common import AIX
85 from ._common import BSD
86 from ._common import FREEBSD # NOQA
87 from ._common import LINUX
88 from ._common import MACOS
89 from ._common import NETBSD # NOQA
90 from ._common import OPENBSD # NOQA
91 from ._common import OSX # deprecated alias
92 from ._common import POSIX # NOQA
93 from ._common import SUNOS
94 from ._common import WINDOWS
95
96 if LINUX:
97 # This is public API and it will be retrieved from _pslinux.py
98 # via sys.modules.
99 PROCFS_PATH = "/proc"
100
101 from . import _pslinux as _psplatform
102
103 from ._pslinux import IOPRIO_CLASS_BE # NOQA
104 from ._pslinux import IOPRIO_CLASS_IDLE # NOQA
105 from ._pslinux import IOPRIO_CLASS_NONE # NOQA
106 from ._pslinux import IOPRIO_CLASS_RT # NOQA
107 # Linux >= 2.6.36
108 if _psplatform.HAS_PRLIMIT:
109 from ._psutil_linux import RLIM_INFINITY # NOQA
110 from ._psutil_linux import RLIMIT_AS # NOQA
111 from ._psutil_linux import RLIMIT_CORE # NOQA
112 from ._psutil_linux import RLIMIT_CPU # NOQA
113 from ._psutil_linux import RLIMIT_DATA # NOQA
114 from ._psutil_linux import RLIMIT_FSIZE # NOQA
115 from ._psutil_linux import RLIMIT_LOCKS # NOQA
116 from ._psutil_linux import RLIMIT_MEMLOCK # NOQA
117 from ._psutil_linux import RLIMIT_NOFILE # NOQA
118 from ._psutil_linux import RLIMIT_NPROC # NOQA
119 from ._psutil_linux import RLIMIT_RSS # NOQA
120 from ._psutil_linux import RLIMIT_STACK # NOQA
121 # Kinda ugly but considerably faster than using hasattr() and
122 # setattr() against the module object (we are at import time:
123 # speed matters).
124 from . import _psutil_linux
125 try:
126 RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
127 except AttributeError:
128 pass
129 try:
130 RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
131 except AttributeError:
132 pass
133 try:
134 RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
135 except AttributeError:
136 pass
137 try:
138 RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
139 except AttributeError:
140 pass
141 try:
142 RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
143 except AttributeError:
144 pass
145
146 elif WINDOWS:
147 from . import _pswindows as _psplatform
148 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA
149 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA
150 from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA
151 from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA
152 from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA
153 from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA
154 from ._pswindows import CONN_DELETE_TCB # NOQA
155 from ._pswindows import IOPRIO_VERYLOW # NOQA
156 from ._pswindows import IOPRIO_LOW # NOQA
157 from ._pswindows import IOPRIO_NORMAL # NOQA
158 from ._pswindows import IOPRIO_HIGH # NOQA
159
160 elif MACOS:
161 from . import _psosx as _psplatform
162
163 elif BSD:
164 from . import _psbsd as _psplatform
165
166 elif SUNOS:
167 from . import _pssunos as _psplatform
168 from ._pssunos import CONN_BOUND # NOQA
169 from ._pssunos import CONN_IDLE # NOQA
170
171 # This is public writable API which is read from _pslinux.py and
172 # _pssunos.py via sys.modules.
173 PROCFS_PATH = "/proc"
174
175 elif AIX:
176 from . import _psaix as _psplatform
177
178 # This is public API and it will be retrieved from _pslinux.py
179 # via sys.modules.
180 PROCFS_PATH = "/proc"
181
182 else: # pragma: no cover
183 raise NotImplementedError('platform %s is not supported' % sys.platform)
184
185
186 __all__ = [
187 # exceptions
188 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
189 "TimeoutExpired",
190
191 # constants
192 "version_info", "__version__",
193
194 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
195 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
196 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
197 "STATUS_PARKED",
198
199 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
200 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
201 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
202
203 "AF_LINK",
204
205 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
206
207 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
208
209 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
210 "SUNOS", "WINDOWS", "AIX",
211
212 # classes
213 "Process", "Popen",
214
215 # functions
216 "pid_exists", "pids", "process_iter", "wait_procs", # proc
217 "virtual_memory", "swap_memory", # memory
218 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
219 "cpu_stats", # "cpu_freq", "getloadavg"
220 "net_io_counters", "net_connections", "net_if_addrs", # network
221 "net_if_stats",
222 "disk_io_counters", "disk_partitions", "disk_usage", # disk
223 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
224 "users", "boot_time", # others
225 ]
226
227
228 __all__.extend(_psplatform.__extra__all__)
229 __author__ = "Giampaolo Rodola'"
230 __version__ = "5.7.0"
231 version_info = tuple([int(num) for num in __version__.split('.')])
232
233 _timer = getattr(time, 'monotonic', time.time)
234 AF_LINK = _psplatform.AF_LINK
235 POWER_TIME_UNLIMITED = _common.POWER_TIME_UNLIMITED
236 POWER_TIME_UNKNOWN = _common.POWER_TIME_UNKNOWN
237 _TOTAL_PHYMEM = None
238 _LOWEST_PID = None
239
240 # Sanity check in case the user messed up with psutil installation
241 # or did something weird with sys.path. In this case we might end
242 # up importing a python module using a C extension module which
243 # was compiled for a different version of psutil.
244 # We want to prevent that by failing sooner rather than later.
245 # See: https://github.com/giampaolo/psutil/issues/564
246 if (int(__version__.replace('.', '')) !=
247 getattr(_psplatform.cext, 'version', None)):
248 msg = "version conflict: %r C extension module was built for another " \
249 "version of psutil" % getattr(_psplatform.cext, "__file__")
250 if hasattr(_psplatform.cext, 'version'):
251 msg += " (%s instead of %s)" % (
252 '.'.join([x for x in str(_psplatform.cext.version)]), __version__)
253 else:
254 msg += " (different than %s)" % __version__
255 msg += "; you may try to 'pip uninstall psutil', manually remove %s" % (
256 getattr(_psplatform.cext, "__file__",
257 "the existing psutil install directory"))
258 msg += " or clean the virtual env somehow, then reinstall"
259 raise ImportError(msg)
260
261
262 # =====================================================================
263 # --- Utils
264 # =====================================================================
265
266
267 if hasattr(_psplatform, 'ppid_map'):
268 # Faster version (Windows and Linux).
269 _ppid_map = _psplatform.ppid_map
270 else:
271 def _ppid_map():
272 """Return a {pid: ppid, ...} dict for all running processes in
273 one shot. Used to speed up Process.children().
274 """
275 ret = {}
276 for pid in pids():
277 try:
278 ret[pid] = _psplatform.Process(pid).ppid()
279 except (NoSuchProcess, ZombieProcess):
280 pass
281 return ret
282
283
284 def _assert_pid_not_reused(fun):
285 """Decorator which raises NoSuchProcess in case a process is no
286 longer running or its PID has been reused.
287 """
288 @functools.wraps(fun)
289 def wrapper(self, *args, **kwargs):
290 if not self.is_running():
291 raise NoSuchProcess(self.pid, self._name)
292 return fun(self, *args, **kwargs)
293 return wrapper
294
295
296 def _pprint_secs(secs):
297 """Format seconds in a human readable form."""
298 now = time.time()
299 secs_ago = int(now - secs)
300 if secs_ago < 60 * 60 * 24:
301 fmt = "%H:%M:%S"
302 else:
303 fmt = "%Y-%m-%d %H:%M:%S"
304 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
305
306
307 # =====================================================================
308 # --- Process class
309 # =====================================================================
310
311
312 class Process(object):
313 """Represents an OS process with the given PID.
314 If PID is omitted current process PID (os.getpid()) is used.
315 Raise NoSuchProcess if PID does not exist.
316
317 Note that most of the methods of this class do not make sure
318 the PID of the process being queried has been reused over time.
319 That means you might end up retrieving an information referring
320 to another process in case the original one this instance
321 refers to is gone in the meantime.
322
323 The only exceptions for which process identity is pre-emptively
324 checked and guaranteed are:
325
326 - parent()
327 - children()
328 - nice() (set)
329 - ionice() (set)
330 - rlimit() (set)
331 - cpu_affinity (set)
332 - suspend()
333 - resume()
334 - send_signal()
335 - terminate()
336 - kill()
337
338 To prevent this problem for all other methods you can:
339 - use is_running() before querying the process
340 - if you're continuously iterating over a set of Process
341 instances use process_iter() which pre-emptively checks
342 process identity for every yielded instance
343 """
344
345 def __init__(self, pid=None):
346 self._init(pid)
347
348 def _init(self, pid, _ignore_nsp=False):
349 if pid is None:
350 pid = os.getpid()
351 else:
352 if not _PY3 and not isinstance(pid, (int, long)):
353 raise TypeError('pid must be an integer (got %r)' % pid)
354 if pid < 0:
355 raise ValueError('pid must be a positive integer (got %s)'
356 % pid)
357 self._pid = pid
358 self._name = None
359 self._exe = None
360 self._create_time = None
361 self._gone = False
362 self._hash = None
363 self._lock = threading.RLock()
364 # used for caching on Windows only (on POSIX ppid may change)
365 self._ppid = None
366 # platform-specific modules define an _psplatform.Process
367 # implementation class
368 self._proc = _psplatform.Process(pid)
369 self._last_sys_cpu_times = None
370 self._last_proc_cpu_times = None
371 # cache creation time for later use in is_running() method
372 try:
373 self.create_time()
374 except AccessDenied:
375 # We should never get here as AFAIK we're able to get
376 # process creation time on all platforms even as a
377 # limited user.
378 pass
379 except ZombieProcess:
380 # Zombies can still be queried by this class (although
381 # not always) and pids() return them so just go on.
382 pass
383 except NoSuchProcess:
384 if not _ignore_nsp:
385 msg = 'no process found with pid %s' % pid
386 raise NoSuchProcess(pid, None, msg)
387 else:
388 self._gone = True
389 # This pair is supposed to indentify a Process instance
390 # univocally over time (the PID alone is not enough as
391 # it might refer to a process whose PID has been reused).
392 # This will be used later in __eq__() and is_running().
393 self._ident = (self.pid, self._create_time)
394
395 def __str__(self):
396 try:
397 info = collections.OrderedDict()
398 except AttributeError:
399 info = {} # Python 2.6
400 info["pid"] = self.pid
401 try:
402 info["name"] = self.name()
403 if self._create_time:
404 info['started'] = _pprint_secs(self._create_time)
405 except ZombieProcess:
406 info["status"] = "zombie"
407 except NoSuchProcess:
408 info["status"] = "terminated"
409 except AccessDenied:
410 pass
411 return "%s.%s(%s)" % (
412 self.__class__.__module__,
413 self.__class__.__name__,
414 ", ".join(["%s=%r" % (k, v) for k, v in info.items()]))
415
416 __repr__ = __str__
417
418 def __eq__(self, other):
419 # Test for equality with another Process object based
420 # on PID and creation time.
421 if not isinstance(other, Process):
422 return NotImplemented
423 return self._ident == other._ident
424
425 def __ne__(self, other):
426 return not self == other
427
428 def __hash__(self):
429 if self._hash is None:
430 self._hash = hash(self._ident)
431 return self._hash
432
433 @property
434 def pid(self):
435 """The process PID."""
436 return self._pid
437
438 # --- utility methods
439
440 @contextlib.contextmanager
441 def oneshot(self):
442 """Utility context manager which considerably speeds up the
443 retrieval of multiple process information at the same time.
444
445 Internally different process info (e.g. name, ppid, uids,
446 gids, ...) may be fetched by using the same routine, but
447 only one information is returned and the others are discarded.
448 When using this context manager the internal routine is
449 executed once (in the example below on name()) and the
450 other info are cached.
451
452 The cache is cleared when exiting the context manager block.
453 The advice is to use this every time you retrieve more than
454 one information about the process. If you're lucky, you'll
455 get a hell of a speedup.
456
457 >>> import psutil
458 >>> p = psutil.Process()
459 >>> with p.oneshot():
460 ... p.name() # collect multiple info
461 ... p.cpu_times() # return cached value
462 ... p.cpu_percent() # return cached value
463 ... p.create_time() # return cached value
464 ...
465 >>>
466 """
467 with self._lock:
468 if hasattr(self, "_cache"):
469 # NOOP: this covers the use case where the user enters the
470 # context twice:
471 #
472 # >>> with p.oneshot():
473 # ... with p.oneshot():
474 # ...
475 #
476 # Also, since as_dict() internally uses oneshot()
477 # I expect that the code below will be a pretty common
478 # "mistake" that the user will make, so let's guard
479 # against that:
480 #
481 # >>> with p.oneshot():
482 # ... p.as_dict()
483 # ...
484 yield
485 else:
486 try:
487 # cached in case cpu_percent() is used
488 self.cpu_times.cache_activate(self)
489 # cached in case memory_percent() is used
490 self.memory_info.cache_activate(self)
491 # cached in case parent() is used
492 self.ppid.cache_activate(self)
493 # cached in case username() is used
494 if POSIX:
495 self.uids.cache_activate(self)
496 # specific implementation cache
497 self._proc.oneshot_enter()
498 yield
499 finally:
500 self.cpu_times.cache_deactivate(self)
501 self.memory_info.cache_deactivate(self)
502 self.ppid.cache_deactivate(self)
503 if POSIX:
504 self.uids.cache_deactivate(self)
505 self._proc.oneshot_exit()
506
507 def as_dict(self, attrs=None, ad_value=None):
508 """Utility method returning process information as a
509 hashable dictionary.
510 If *attrs* is specified it must be a list of strings
511 reflecting available Process class' attribute names
512 (e.g. ['cpu_times', 'name']) else all public (read
513 only) attributes are assumed.
514 *ad_value* is the value which gets assigned in case
515 AccessDenied or ZombieProcess exception is raised when
516 retrieving that particular process information.
517 """
518 valid_names = _as_dict_attrnames
519 if attrs is not None:
520 if not isinstance(attrs, (list, tuple, set, frozenset)):
521 raise TypeError("invalid attrs type %s" % type(attrs))
522 attrs = set(attrs)
523 invalid_names = attrs - valid_names
524 if invalid_names:
525 raise ValueError("invalid attr name%s %s" % (
526 "s" if len(invalid_names) > 1 else "",
527 ", ".join(map(repr, invalid_names))))
528
529 retdict = dict()
530 ls = attrs or valid_names
531 with self.oneshot():
532 for name in ls:
533 try:
534 if name == 'pid':
535 ret = self.pid
536 else:
537 meth = getattr(self, name)
538 ret = meth()
539 except (AccessDenied, ZombieProcess):
540 ret = ad_value
541 except NotImplementedError:
542 # in case of not implemented functionality (may happen
543 # on old or exotic systems) we want to crash only if
544 # the user explicitly asked for that particular attr
545 if attrs:
546 raise
547 continue
548 retdict[name] = ret
549 return retdict
550
551 def parent(self):
552 """Return the parent process as a Process object pre-emptively
553 checking whether PID has been reused.
554 If no parent is known return None.
555 """
556 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
557 if self.pid == lowest_pid:
558 return None
559 ppid = self.ppid()
560 if ppid is not None:
561 ctime = self.create_time()
562 try:
563 parent = Process(ppid)
564 if parent.create_time() <= ctime:
565 return parent
566 # ...else ppid has been reused by another process
567 except NoSuchProcess:
568 pass
569
570 def parents(self):
571 """Return the parents of this process as a list of Process
572 instances. If no parents are known return an empty list.
573 """
574 parents = []
575 proc = self.parent()
576 while proc is not None:
577 parents.append(proc)
578 proc = proc.parent()
579 return parents
580
581 def is_running(self):
582 """Return whether this process is running.
583 It also checks if PID has been reused by another process in
584 which case return False.
585 """
586 if self._gone:
587 return False
588 try:
589 # Checking if PID is alive is not enough as the PID might
590 # have been reused by another process: we also want to
591 # verify process identity.
592 # Process identity / uniqueness over time is guaranteed by
593 # (PID + creation time) and that is verified in __eq__.
594 return self == Process(self.pid)
595 except ZombieProcess:
596 # We should never get here as it's already handled in
597 # Process.__init__; here just for extra safety.
598 return True
599 except NoSuchProcess:
600 self._gone = True
601 return False
602
603 # --- actual API
604
605 @memoize_when_activated
606 def ppid(self):
607 """The process parent PID.
608 On Windows the return value is cached after first call.
609 """
610 # On POSIX we don't want to cache the ppid as it may unexpectedly
611 # change to 1 (init) in case this process turns into a zombie:
612 # https://github.com/giampaolo/psutil/issues/321
613 # http://stackoverflow.com/questions/356722/
614
615 # XXX should we check creation time here rather than in
616 # Process.parent()?
617 if POSIX:
618 return self._proc.ppid()
619 else: # pragma: no cover
620 self._ppid = self._ppid or self._proc.ppid()
621 return self._ppid
622
623 def name(self):
624 """The process name. The return value is cached after first call."""
625 # Process name is only cached on Windows as on POSIX it may
626 # change, see:
627 # https://github.com/giampaolo/psutil/issues/692
628 if WINDOWS and self._name is not None:
629 return self._name
630 name = self._proc.name()
631 if POSIX and len(name) >= 15:
632 # On UNIX the name gets truncated to the first 15 characters.
633 # If it matches the first part of the cmdline we return that
634 # one instead because it's usually more explicative.
635 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
636 try:
637 cmdline = self.cmdline()
638 except AccessDenied:
639 pass
640 else:
641 if cmdline:
642 extended_name = os.path.basename(cmdline[0])
643 if extended_name.startswith(name):
644 name = extended_name
645 self._name = name
646 self._proc._name = name
647 return name
648
649 def exe(self):
650 """The process executable as an absolute path.
651 May also be an empty string.
652 The return value is cached after first call.
653 """
654 def guess_it(fallback):
655 # try to guess exe from cmdline[0] in absence of a native
656 # exe representation
657 cmdline = self.cmdline()
658 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
659 exe = cmdline[0] # the possible exe
660 # Attempt to guess only in case of an absolute path.
661 # It is not safe otherwise as the process might have
662 # changed cwd.
663 if (os.path.isabs(exe) and
664 os.path.isfile(exe) and
665 os.access(exe, os.X_OK)):
666 return exe
667 if isinstance(fallback, AccessDenied):
668 raise fallback
669 return fallback
670
671 if self._exe is None:
672 try:
673 exe = self._proc.exe()
674 except AccessDenied as err:
675 return guess_it(fallback=err)
676 else:
677 if not exe:
678 # underlying implementation can legitimately return an
679 # empty string; if that's the case we don't want to
680 # raise AD while guessing from the cmdline
681 try:
682 exe = guess_it(fallback=exe)
683 except AccessDenied:
684 pass
685 self._exe = exe
686 return self._exe
687
688 def cmdline(self):
689 """The command line this process has been called with."""
690 return self._proc.cmdline()
691
692 def status(self):
693 """The process current status as a STATUS_* constant."""
694 try:
695 return self._proc.status()
696 except ZombieProcess:
697 return STATUS_ZOMBIE
698
699 def username(self):
700 """The name of the user that owns the process.
701 On UNIX this is calculated by using *real* process uid.
702 """
703 if POSIX:
704 if pwd is None:
705 # might happen if python was installed from sources
706 raise ImportError(
707 "requires pwd module shipped with standard python")
708 real_uid = self.uids().real
709 try:
710 return pwd.getpwuid(real_uid).pw_name
711 except KeyError:
712 # the uid can't be resolved by the system
713 return str(real_uid)
714 else:
715 return self._proc.username()
716
717 def create_time(self):
718 """The process creation time as a floating point number
719 expressed in seconds since the epoch, in UTC.
720 The return value is cached after first call.
721 """
722 if self._create_time is None:
723 self._create_time = self._proc.create_time()
724 return self._create_time
725
726 def cwd(self):
727 """Process current working directory as an absolute path."""
728 return self._proc.cwd()
729
730 def nice(self, value=None):
731 """Get or set process niceness (priority)."""
732 if value is None:
733 return self._proc.nice_get()
734 else:
735 if not self.is_running():
736 raise NoSuchProcess(self.pid, self._name)
737 self._proc.nice_set(value)
738
739 if POSIX:
740
741 @memoize_when_activated
742 def uids(self):
743 """Return process UIDs as a (real, effective, saved)
744 namedtuple.
745 """
746 return self._proc.uids()
747
748 def gids(self):
749 """Return process GIDs as a (real, effective, saved)
750 namedtuple.
751 """
752 return self._proc.gids()
753
754 def terminal(self):
755 """The terminal associated with this process, if any,
756 else None.
757 """
758 return self._proc.terminal()
759
760 def num_fds(self):
761 """Return the number of file descriptors opened by this
762 process (POSIX only).
763 """
764 return self._proc.num_fds()
765
766 # Linux, BSD, AIX and Windows only
767 if hasattr(_psplatform.Process, "io_counters"):
768
769 def io_counters(self):
770 """Return process I/O statistics as a
771 (read_count, write_count, read_bytes, write_bytes)
772 namedtuple.
773 Those are the number of read/write calls performed and the
774 amount of bytes read and written by the process.
775 """
776 return self._proc.io_counters()
777
778 # Linux and Windows
779 if hasattr(_psplatform.Process, "ionice_get"):
780
781 def ionice(self, ioclass=None, value=None):
782 """Get or set process I/O niceness (priority).
783
784 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
785 *value* is a number which goes from 0 to 7. The higher the
786 value, the lower the I/O priority of the process.
787
788 On Windows only *ioclass* is used and it can be set to 2
789 (normal), 1 (low) or 0 (very low).
790
791 Available on Linux and Windows > Vista only.
792 """
793 if ioclass is None:
794 if value is not None:
795 raise ValueError("'ioclass' argument must be specified")
796 return self._proc.ionice_get()
797 else:
798 return self._proc.ionice_set(ioclass, value)
799
800 # Linux only
801 if hasattr(_psplatform.Process, "rlimit"):
802
803 def rlimit(self, resource, limits=None):
804 """Get or set process resource limits as a (soft, hard)
805 tuple.
806
807 *resource* is one of the RLIMIT_* constants.
808 *limits* is supposed to be a (soft, hard) tuple.
809
810 See "man prlimit" for further info.
811 Available on Linux only.
812 """
813 if limits is None:
814 return self._proc.rlimit(resource)
815 else:
816 return self._proc.rlimit(resource, limits)
817
818 # Windows, Linux and FreeBSD only
819 if hasattr(_psplatform.Process, "cpu_affinity_get"):
820
821 def cpu_affinity(self, cpus=None):
822 """Get or set process CPU affinity.
823 If specified, *cpus* must be a list of CPUs for which you
824 want to set the affinity (e.g. [0, 1]).
825 If an empty list is passed, all egible CPUs are assumed
826 (and set).
827 (Windows, Linux and BSD only).
828 """
829 if cpus is None:
830 return list(set(self._proc.cpu_affinity_get()))
831 else:
832 if not cpus:
833 if hasattr(self._proc, "_get_eligible_cpus"):
834 cpus = self._proc._get_eligible_cpus()
835 else:
836 cpus = tuple(range(len(cpu_times(percpu=True))))
837 self._proc.cpu_affinity_set(list(set(cpus)))
838
839 # Linux, FreeBSD, SunOS
840 if hasattr(_psplatform.Process, "cpu_num"):
841
842 def cpu_num(self):
843 """Return what CPU this process is currently running on.
844 The returned number should be <= psutil.cpu_count()
845 and <= len(psutil.cpu_percent(percpu=True)).
846 It may be used in conjunction with
847 psutil.cpu_percent(percpu=True) to observe the system
848 workload distributed across CPUs.
849 """
850 return self._proc.cpu_num()
851
852 # Linux, macOS, Windows, Solaris, AIX
853 if hasattr(_psplatform.Process, "environ"):
854
855 def environ(self):
856 """The environment variables of the process as a dict. Note: this
857 might not reflect changes made after the process started. """
858 return self._proc.environ()
859
860 if WINDOWS:
861
862 def num_handles(self):
863 """Return the number of handles opened by this process
864 (Windows only).
865 """
866 return self._proc.num_handles()
867
868 def num_ctx_switches(self):
869 """Return the number of voluntary and involuntary context
870 switches performed by this process.
871 """
872 return self._proc.num_ctx_switches()
873
874 def num_threads(self):
875 """Return the number of threads used by this process."""
876 return self._proc.num_threads()
877
878 if hasattr(_psplatform.Process, "threads"):
879
880 def threads(self):
881 """Return threads opened by process as a list of
882 (id, user_time, system_time) namedtuples representing
883 thread id and thread CPU times (user/system).
884 On OpenBSD this method requires root access.
885 """
886 return self._proc.threads()
887
888 @_assert_pid_not_reused
889 def children(self, recursive=False):
890 """Return the children of this process as a list of Process
891 instances, pre-emptively checking whether PID has been reused.
892 If *recursive* is True return all the parent descendants.
893
894 Example (A == this process):
895
896 A ─┐
897
898 ├─ B (child) ─┐
899 │ └─ X (grandchild) ─┐
900 │ └─ Y (great grandchild)
901 ├─ C (child)
902 └─ D (child)
903
904 >>> import psutil
905 >>> p = psutil.Process()
906 >>> p.children()
907 B, C, D
908 >>> p.children(recursive=True)
909 B, X, Y, C, D
910
911 Note that in the example above if process X disappears
912 process Y won't be listed as the reference to process A
913 is lost.
914 """
915 ppid_map = _ppid_map()
916 ret = []
917 if not recursive:
918 for pid, ppid in ppid_map.items():
919 if ppid == self.pid:
920 try:
921 child = Process(pid)
922 # if child happens to be older than its parent
923 # (self) it means child's PID has been reused
924 if self.create_time() <= child.create_time():
925 ret.append(child)
926 except (NoSuchProcess, ZombieProcess):
927 pass
928 else:
929 # Construct a {pid: [child pids]} dict
930 reverse_ppid_map = collections.defaultdict(list)
931 for pid, ppid in ppid_map.items():
932 reverse_ppid_map[ppid].append(pid)
933 # Recursively traverse that dict, starting from self.pid,
934 # such that we only call Process() on actual children
935 seen = set()
936 stack = [self.pid]
937 while stack:
938 pid = stack.pop()
939 if pid in seen:
940 # Since pids can be reused while the ppid_map is
941 # constructed, there may be rare instances where
942 # there's a cycle in the recorded process "tree".
943 continue
944 seen.add(pid)
945 for child_pid in reverse_ppid_map[pid]:
946 try:
947 child = Process(child_pid)
948 # if child happens to be older than its parent
949 # (self) it means child's PID has been reused
950 intime = self.create_time() <= child.create_time()
951 if intime:
952 ret.append(child)
953 stack.append(child_pid)
954 except (NoSuchProcess, ZombieProcess):
955 pass
956 return ret
957
958 def cpu_percent(self, interval=None):
959 """Return a float representing the current process CPU
960 utilization as a percentage.
961
962 When *interval* is 0.0 or None (default) compares process times
963 to system CPU times elapsed since last call, returning
964 immediately (non-blocking). That means that the first time
965 this is called it will return a meaningful 0.0 value.
966
967 When *interval* is > 0.0 compares process times to system CPU
968 times elapsed before and after the interval (blocking).
969
970 In this case is recommended for accuracy that this function
971 be called with at least 0.1 seconds between calls.
972
973 A value > 100.0 can be returned in case of processes running
974 multiple threads on different CPU cores.
975
976 The returned value is explicitly NOT split evenly between
977 all available logical CPUs. This means that a busy loop process
978 running on a system with 2 logical CPUs will be reported as
979 having 100% CPU utilization instead of 50%.
980
981 Examples:
982
983 >>> import psutil
984 >>> p = psutil.Process(os.getpid())
985 >>> # blocking
986 >>> p.cpu_percent(interval=1)
987 2.0
988 >>> # non-blocking (percentage since last call)
989 >>> p.cpu_percent(interval=None)
990 2.9
991 >>>
992 """
993 blocking = interval is not None and interval > 0.0
994 if interval is not None and interval < 0:
995 raise ValueError("interval is not positive (got %r)" % interval)
996 num_cpus = cpu_count() or 1
997
998 def timer():
999 return _timer() * num_cpus
1000
1001 if blocking:
1002 st1 = timer()
1003 pt1 = self._proc.cpu_times()
1004 time.sleep(interval)
1005 st2 = timer()
1006 pt2 = self._proc.cpu_times()
1007 else:
1008 st1 = self._last_sys_cpu_times
1009 pt1 = self._last_proc_cpu_times
1010 st2 = timer()
1011 pt2 = self._proc.cpu_times()
1012 if st1 is None or pt1 is None:
1013 self._last_sys_cpu_times = st2
1014 self._last_proc_cpu_times = pt2
1015 return 0.0
1016
1017 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1018 delta_time = st2 - st1
1019 # reset values for next call in case of interval == None
1020 self._last_sys_cpu_times = st2
1021 self._last_proc_cpu_times = pt2
1022
1023 try:
1024 # This is the utilization split evenly between all CPUs.
1025 # E.g. a busy loop process on a 2-CPU-cores system at this
1026 # point is reported as 50% instead of 100%.
1027 overall_cpus_percent = ((delta_proc / delta_time) * 100)
1028 except ZeroDivisionError:
1029 # interval was too low
1030 return 0.0
1031 else:
1032 # Note 1:
1033 # in order to emulate "top" we multiply the value for the num
1034 # of CPU cores. This way the busy process will be reported as
1035 # having 100% (or more) usage.
1036 #
1037 # Note 2:
1038 # taskmgr.exe on Windows differs in that it will show 50%
1039 # instead.
1040 #
1041 # Note 3:
1042 # a percentage > 100 is legitimate as it can result from a
1043 # process with multiple threads running on different CPU
1044 # cores (top does the same), see:
1045 # http://stackoverflow.com/questions/1032357
1046 # https://github.com/giampaolo/psutil/issues/474
1047 single_cpu_percent = overall_cpus_percent * num_cpus
1048 return round(single_cpu_percent, 1)
1049
1050 @memoize_when_activated
1051 def cpu_times(self):
1052 """Return a (user, system, children_user, children_system)
1053 namedtuple representing the accumulated process time, in
1054 seconds.
1055 This is similar to os.times() but per-process.
1056 On macOS and Windows children_user and children_system are
1057 always set to 0.
1058 """
1059 return self._proc.cpu_times()
1060
1061 @memoize_when_activated
1062 def memory_info(self):
1063 """Return a namedtuple with variable fields depending on the
1064 platform, representing memory information about the process.
1065
1066 The "portable" fields available on all plaforms are `rss` and `vms`.
1067
1068 All numbers are expressed in bytes.
1069 """
1070 return self._proc.memory_info()
1071
1072 @deprecated_method(replacement="memory_info")
1073 def memory_info_ex(self):
1074 return self.memory_info()
1075
1076 def memory_full_info(self):
1077 """This method returns the same information as memory_info(),
1078 plus, on some platform (Linux, macOS, Windows), also provides
1079 additional metrics (USS, PSS and swap).
1080 The additional metrics provide a better representation of actual
1081 process memory usage.
1082
1083 Namely USS is the memory which is unique to a process and which
1084 would be freed if the process was terminated right now.
1085
1086 It does so by passing through the whole process address.
1087 As such it usually requires higher user privileges than
1088 memory_info() and is considerably slower.
1089 """
1090 return self._proc.memory_full_info()
1091
1092 def memory_percent(self, memtype="rss"):
1093 """Compare process memory to total physical system memory and
1094 calculate process memory utilization as a percentage.
1095 *memtype* argument is a string that dictates what type of
1096 process memory you want to compare against (defaults to "rss").
1097 The list of available strings can be obtained like this:
1098
1099 >>> psutil.Process().memory_info()._fields
1100 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1101 """
1102 valid_types = list(_psplatform.pfullmem._fields)
1103 if memtype not in valid_types:
1104 raise ValueError("invalid memtype %r; valid types are %r" % (
1105 memtype, tuple(valid_types)))
1106 fun = self.memory_info if memtype in _psplatform.pmem._fields else \
1107 self.memory_full_info
1108 metrics = fun()
1109 value = getattr(metrics, memtype)
1110
1111 # use cached value if available
1112 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1113 if not total_phymem > 0:
1114 # we should never get here
1115 raise ValueError(
1116 "can't calculate process memory percent because "
1117 "total physical system memory is not positive (%r)"
1118 % total_phymem)
1119 return (value / float(total_phymem)) * 100
1120
1121 if hasattr(_psplatform.Process, "memory_maps"):
1122 def memory_maps(self, grouped=True):
1123 """Return process' mapped memory regions as a list of namedtuples
1124 whose fields are variable depending on the platform.
1125
1126 If *grouped* is True the mapped regions with the same 'path'
1127 are grouped together and the different memory fields are summed.
1128
1129 If *grouped* is False every mapped region is shown as a single
1130 entity and the namedtuple will also include the mapped region's
1131 address space ('addr') and permission set ('perms').
1132 """
1133 it = self._proc.memory_maps()
1134 if grouped:
1135 d = {}
1136 for tupl in it:
1137 path = tupl[2]
1138 nums = tupl[3:]
1139 try:
1140 d[path] = map(lambda x, y: x + y, d[path], nums)
1141 except KeyError:
1142 d[path] = nums
1143 nt = _psplatform.pmmap_grouped
1144 return [nt(path, *d[path]) for path in d] # NOQA
1145 else:
1146 nt = _psplatform.pmmap_ext
1147 return [nt(*x) for x in it]
1148
1149 def open_files(self):
1150 """Return files opened by process as a list of
1151 (path, fd) namedtuples including the absolute file name
1152 and file descriptor number.
1153 """
1154 return self._proc.open_files()
1155
1156 def connections(self, kind='inet'):
1157 """Return socket connections opened by process as a list of
1158 (fd, family, type, laddr, raddr, status) namedtuples.
1159 The *kind* parameter filters for connections that match the
1160 following criteria:
1161
1162 +------------+----------------------------------------------------+
1163 | Kind Value | Connections using |
1164 +------------+----------------------------------------------------+
1165 | inet | IPv4 and IPv6 |
1166 | inet4 | IPv4 |
1167 | inet6 | IPv6 |
1168 | tcp | TCP |
1169 | tcp4 | TCP over IPv4 |
1170 | tcp6 | TCP over IPv6 |
1171 | udp | UDP |
1172 | udp4 | UDP over IPv4 |
1173 | udp6 | UDP over IPv6 |
1174 | unix | UNIX socket (both UDP and TCP protocols) |
1175 | all | the sum of all the possible families and protocols |
1176 +------------+----------------------------------------------------+
1177 """
1178 return self._proc.connections(kind)
1179
1180 # --- signals
1181
1182 if POSIX:
1183 def _send_signal(self, sig):
1184 assert not self.pid < 0, self.pid
1185 if self.pid == 0:
1186 # see "man 2 kill"
1187 raise ValueError(
1188 "preventing sending signal to process with PID 0 as it "
1189 "would affect every process in the process group of the "
1190 "calling process (os.getpid()) instead of PID 0")
1191 try:
1192 os.kill(self.pid, sig)
1193 except ProcessLookupError:
1194 if OPENBSD and pid_exists(self.pid):
1195 # We do this because os.kill() lies in case of
1196 # zombie processes.
1197 raise ZombieProcess(self.pid, self._name, self._ppid)
1198 else:
1199 self._gone = True
1200 raise NoSuchProcess(self.pid, self._name)
1201 except PermissionError:
1202 raise AccessDenied(self.pid, self._name)
1203
1204 @_assert_pid_not_reused
1205 def send_signal(self, sig):
1206 """Send a signal *sig* to process pre-emptively checking
1207 whether PID has been reused (see signal module constants) .
1208 On Windows only SIGTERM is valid and is treated as an alias
1209 for kill().
1210 """
1211 if POSIX:
1212 self._send_signal(sig)
1213 else: # pragma: no cover
1214 self._proc.send_signal(sig)
1215
1216 @_assert_pid_not_reused
1217 def suspend(self):
1218 """Suspend process execution with SIGSTOP pre-emptively checking
1219 whether PID has been reused.
1220 On Windows this has the effect ot suspending all process threads.
1221 """
1222 if POSIX:
1223 self._send_signal(signal.SIGSTOP)
1224 else: # pragma: no cover
1225 self._proc.suspend()
1226
1227 @_assert_pid_not_reused
1228 def resume(self):
1229 """Resume process execution with SIGCONT pre-emptively checking
1230 whether PID has been reused.
1231 On Windows this has the effect of resuming all process threads.
1232 """
1233 if POSIX:
1234 self._send_signal(signal.SIGCONT)
1235 else: # pragma: no cover
1236 self._proc.resume()
1237
1238 @_assert_pid_not_reused
1239 def terminate(self):
1240 """Terminate the process with SIGTERM pre-emptively checking
1241 whether PID has been reused.
1242 On Windows this is an alias for kill().
1243 """
1244 if POSIX:
1245 self._send_signal(signal.SIGTERM)
1246 else: # pragma: no cover
1247 self._proc.kill()
1248
1249 @_assert_pid_not_reused
1250 def kill(self):
1251 """Kill the current process with SIGKILL pre-emptively checking
1252 whether PID has been reused.
1253 """
1254 if POSIX:
1255 self._send_signal(signal.SIGKILL)
1256 else: # pragma: no cover
1257 self._proc.kill()
1258
1259 def wait(self, timeout=None):
1260 """Wait for process to terminate and, if process is a children
1261 of os.getpid(), also return its exit code, else None.
1262 On Windows there's no such limitation (exit code is always
1263 returned).
1264
1265 If the process is already terminated immediately return None
1266 instead of raising NoSuchProcess.
1267
1268 If *timeout* (in seconds) is specified and process is still
1269 alive raise TimeoutExpired.
1270
1271 To wait for multiple Process(es) use psutil.wait_procs().
1272 """
1273 if timeout is not None and not timeout >= 0:
1274 raise ValueError("timeout must be a positive integer")
1275 return self._proc.wait(timeout)
1276
1277
1278 # =====================================================================
1279 # --- Popen class
1280 # =====================================================================
1281
1282
1283 class Popen(Process):
1284 """A more convenient interface to stdlib subprocess.Popen class.
1285 It starts a sub process and deals with it exactly as when using
1286 subprocess.Popen class but in addition also provides all the
1287 properties and methods of psutil.Process class as a unified
1288 interface:
1289
1290 >>> import psutil
1291 >>> from subprocess import PIPE
1292 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1293 >>> p.name()
1294 'python'
1295 >>> p.uids()
1296 user(real=1000, effective=1000, saved=1000)
1297 >>> p.username()
1298 'giampaolo'
1299 >>> p.communicate()
1300 ('hi\n', None)
1301 >>> p.terminate()
1302 >>> p.wait(timeout=2)
1303 0
1304 >>>
1305
1306 For method names common to both classes such as kill(), terminate()
1307 and wait(), psutil.Process implementation takes precedence.
1308
1309 Unlike subprocess.Popen this class pre-emptively checks whether PID
1310 has been reused on send_signal(), terminate() and kill() so that
1311 you don't accidentally terminate another process, fixing
1312 http://bugs.python.org/issue6973.
1313
1314 For a complete documentation refer to:
1315 http://docs.python.org/3/library/subprocess.html
1316 """
1317
1318 def __init__(self, *args, **kwargs):
1319 # Explicitly avoid to raise NoSuchProcess in case the process
1320 # spawned by subprocess.Popen terminates too quickly, see:
1321 # https://github.com/giampaolo/psutil/issues/193
1322 self.__subproc = subprocess.Popen(*args, **kwargs)
1323 self._init(self.__subproc.pid, _ignore_nsp=True)
1324
1325 def __dir__(self):
1326 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1327
1328 def __enter__(self):
1329 if hasattr(self.__subproc, '__enter__'):
1330 self.__subproc.__enter__()
1331 return self
1332
1333 def __exit__(self, *args, **kwargs):
1334 if hasattr(self.__subproc, '__exit__'):
1335 return self.__subproc.__exit__(*args, **kwargs)
1336 else:
1337 if self.stdout:
1338 self.stdout.close()
1339 if self.stderr:
1340 self.stderr.close()
1341 try:
1342 # Flushing a BufferedWriter may raise an error.
1343 if self.stdin:
1344 self.stdin.close()
1345 finally:
1346 # Wait for the process to terminate, to avoid zombies.
1347 self.wait()
1348
1349 def __getattribute__(self, name):
1350 try:
1351 return object.__getattribute__(self, name)
1352 except AttributeError:
1353 try:
1354 return object.__getattribute__(self.__subproc, name)
1355 except AttributeError:
1356 raise AttributeError("%s instance has no attribute '%s'"
1357 % (self.__class__.__name__, name))
1358
1359 def wait(self, timeout=None):
1360 if self.__subproc.returncode is not None:
1361 return self.__subproc.returncode
1362 ret = super(Popen, self).wait(timeout)
1363 self.__subproc.returncode = ret
1364 return ret
1365
1366
1367 # The valid attr names which can be processed by Process.as_dict().
1368 _as_dict_attrnames = set(
1369 [x for x in dir(Process) if not x.startswith('_') and x not in
1370 ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1371 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1372 'memory_info_ex', 'oneshot']])
1373
1374
1375 # =====================================================================
1376 # --- system processes related functions
1377 # =====================================================================
1378
1379
1380 def pids():
1381 """Return a list of current running PIDs."""
1382 global _LOWEST_PID
1383 ret = sorted(_psplatform.pids())
1384 _LOWEST_PID = ret[0]
1385 return ret
1386
1387
1388 def pid_exists(pid):
1389 """Return True if given PID exists in the current process list.
1390 This is faster than doing "pid in psutil.pids()" and
1391 should be preferred.
1392 """
1393 if pid < 0:
1394 return False
1395 elif pid == 0 and POSIX:
1396 # On POSIX we use os.kill() to determine PID existence.
1397 # According to "man 2 kill" PID 0 has a special meaning
1398 # though: it refers to <<every process in the process
1399 # group of the calling process>> and that is not we want
1400 # to do here.
1401 return pid in pids()
1402 else:
1403 return _psplatform.pid_exists(pid)
1404
1405
1406 _pmap = {}
1407 _lock = threading.Lock()
1408
1409
1410 def process_iter(attrs=None, ad_value=None):
1411 """Return a generator yielding a Process instance for all
1412 running processes.
1413
1414 Every new Process instance is only created once and then cached
1415 into an internal table which is updated every time this is used.
1416
1417 Cached Process instances are checked for identity so that you're
1418 safe in case a PID has been reused by another process, in which
1419 case the cached instance is updated.
1420
1421 The sorting order in which processes are yielded is based on
1422 their PIDs.
1423
1424 *attrs* and *ad_value* have the same meaning as in
1425 Process.as_dict(). If *attrs* is specified as_dict() is called
1426 and the resulting dict is stored as a 'info' attribute attached
1427 to returned Process instance.
1428 If *attrs* is an empty list it will retrieve all process info
1429 (slow).
1430 """
1431 def add(pid):
1432 proc = Process(pid)
1433 if attrs is not None:
1434 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1435 with _lock:
1436 _pmap[proc.pid] = proc
1437 return proc
1438
1439 def remove(pid):
1440 with _lock:
1441 _pmap.pop(pid, None)
1442
1443 a = set(pids())
1444 b = set(_pmap.keys())
1445 new_pids = a - b
1446 gone_pids = b - a
1447 for pid in gone_pids:
1448 remove(pid)
1449
1450 with _lock:
1451 ls = sorted(list(_pmap.items()) +
1452 list(dict.fromkeys(new_pids).items()))
1453
1454 for pid, proc in ls:
1455 try:
1456 if proc is None: # new process
1457 yield add(pid)
1458 else:
1459 # use is_running() to check whether PID has been reused by
1460 # another process in which case yield a new Process instance
1461 if proc.is_running():
1462 if attrs is not None:
1463 proc.info = proc.as_dict(
1464 attrs=attrs, ad_value=ad_value)
1465 yield proc
1466 else:
1467 yield add(pid)
1468 except NoSuchProcess:
1469 remove(pid)
1470 except AccessDenied:
1471 # Process creation time can't be determined hence there's
1472 # no way to tell whether the pid of the cached process
1473 # has been reused. Just return the cached version.
1474 if proc is None and pid in _pmap:
1475 try:
1476 yield _pmap[pid]
1477 except KeyError:
1478 # If we get here it is likely that 2 threads were
1479 # using process_iter().
1480 pass
1481 else:
1482 raise
1483
1484
1485 def wait_procs(procs, timeout=None, callback=None):
1486 """Convenience function which waits for a list of processes to
1487 terminate.
1488
1489 Return a (gone, alive) tuple indicating which processes
1490 are gone and which ones are still alive.
1491
1492 The gone ones will have a new *returncode* attribute indicating
1493 process exit status (may be None).
1494
1495 *callback* is a function which gets called every time a process
1496 terminates (a Process instance is passed as callback argument).
1497
1498 Function will return as soon as all processes terminate or when
1499 *timeout* occurs.
1500 Differently from Process.wait() it will not raise TimeoutExpired if
1501 *timeout* occurs.
1502
1503 Typical use case is:
1504
1505 - send SIGTERM to a list of processes
1506 - give them some time to terminate
1507 - send SIGKILL to those ones which are still alive
1508
1509 Example:
1510
1511 >>> def on_terminate(proc):
1512 ... print("process {} terminated".format(proc))
1513 ...
1514 >>> for p in procs:
1515 ... p.terminate()
1516 ...
1517 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1518 >>> for p in alive:
1519 ... p.kill()
1520 """
1521 def check_gone(proc, timeout):
1522 try:
1523 returncode = proc.wait(timeout=timeout)
1524 except TimeoutExpired:
1525 pass
1526 else:
1527 if returncode is not None or not proc.is_running():
1528 # Set new Process instance attribute.
1529 proc.returncode = returncode
1530 gone.add(proc)
1531 if callback is not None:
1532 callback(proc)
1533
1534 if timeout is not None and not timeout >= 0:
1535 msg = "timeout must be a positive integer, got %s" % timeout
1536 raise ValueError(msg)
1537 gone = set()
1538 alive = set(procs)
1539 if callback is not None and not callable(callback):
1540 raise TypeError("callback %r is not a callable" % callable)
1541 if timeout is not None:
1542 deadline = _timer() + timeout
1543
1544 while alive:
1545 if timeout is not None and timeout <= 0:
1546 break
1547 for proc in alive:
1548 # Make sure that every complete iteration (all processes)
1549 # will last max 1 sec.
1550 # We do this because we don't want to wait too long on a
1551 # single process: in case it terminates too late other
1552 # processes may disappear in the meantime and their PID
1553 # reused.
1554 max_timeout = 1.0 / len(alive)
1555 if timeout is not None:
1556 timeout = min((deadline - _timer()), max_timeout)
1557 if timeout <= 0:
1558 break
1559 check_gone(proc, timeout)
1560 else:
1561 check_gone(proc, max_timeout)
1562 alive = alive - gone
1563
1564 if alive:
1565 # Last attempt over processes survived so far.
1566 # timeout == 0 won't make this function wait any further.
1567 for proc in alive:
1568 check_gone(proc, 0)
1569 alive = alive - gone
1570
1571 return (list(gone), list(alive))
1572
1573
1574 # =====================================================================
1575 # --- CPU related functions
1576 # =====================================================================
1577
1578
1579 def cpu_count(logical=True):
1580 """Return the number of logical CPUs in the system (same as
1581 os.cpu_count() in Python 3.4).
1582
1583 If *logical* is False return the number of physical cores only
1584 (e.g. hyper thread CPUs are excluded).
1585
1586 Return None if undetermined.
1587
1588 The return value is cached after first call.
1589 If desired cache can be cleared like this:
1590
1591 >>> psutil.cpu_count.cache_clear()
1592 """
1593 if logical:
1594 ret = _psplatform.cpu_count_logical()
1595 else:
1596 ret = _psplatform.cpu_count_physical()
1597 if ret is not None and ret < 1:
1598 ret = None
1599 return ret
1600
1601
1602 def cpu_times(percpu=False):
1603 """Return system-wide CPU times as a namedtuple.
1604 Every CPU time represents the seconds the CPU has spent in the
1605 given mode. The namedtuple's fields availability varies depending on the
1606 platform:
1607
1608 - user
1609 - system
1610 - idle
1611 - nice (UNIX)
1612 - iowait (Linux)
1613 - irq (Linux, FreeBSD)
1614 - softirq (Linux)
1615 - steal (Linux >= 2.6.11)
1616 - guest (Linux >= 2.6.24)
1617 - guest_nice (Linux >= 3.2.0)
1618
1619 When *percpu* is True return a list of namedtuples for each CPU.
1620 First element of the list refers to first CPU, second element
1621 to second CPU and so on.
1622 The order of the list is consistent across calls.
1623 """
1624 if not percpu:
1625 return _psplatform.cpu_times()
1626 else:
1627 return _psplatform.per_cpu_times()
1628
1629
1630 try:
1631 _last_cpu_times = cpu_times()
1632 except Exception:
1633 # Don't want to crash at import time.
1634 _last_cpu_times = None
1635
1636 try:
1637 _last_per_cpu_times = cpu_times(percpu=True)
1638 except Exception:
1639 # Don't want to crash at import time.
1640 _last_per_cpu_times = None
1641
1642
1643 def _cpu_tot_time(times):
1644 """Given a cpu_time() ntuple calculates the total CPU time
1645 (including idle time).
1646 """
1647 tot = sum(times)
1648 if LINUX:
1649 # On Linux guest times are already accounted in "user" or
1650 # "nice" times, so we subtract them from total.
1651 # Htop does the same. References:
1652 # https://github.com/giampaolo/psutil/pull/940
1653 # http://unix.stackexchange.com/questions/178045
1654 # https://github.com/torvalds/linux/blob/
1655 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1656 # cputime.c#L158
1657 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1658 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1659 return tot
1660
1661
1662 def _cpu_busy_time(times):
1663 """Given a cpu_time() ntuple calculates the busy CPU time.
1664 We do so by subtracting all idle CPU times.
1665 """
1666 busy = _cpu_tot_time(times)
1667 busy -= times.idle
1668 # Linux: "iowait" is time during which the CPU does not do anything
1669 # (waits for IO to complete). On Linux IO wait is *not* accounted
1670 # in "idle" time so we subtract it. Htop does the same.
1671 # References:
1672 # https://github.com/torvalds/linux/blob/
1673 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1674 busy -= getattr(times, "iowait", 0)
1675 return busy
1676
1677
1678 def _cpu_times_deltas(t1, t2):
1679 assert t1._fields == t2._fields, (t1, t2)
1680 field_deltas = []
1681 for field in _psplatform.scputimes._fields:
1682 field_delta = getattr(t2, field) - getattr(t1, field)
1683 # CPU times are always supposed to increase over time
1684 # or at least remain the same and that's because time
1685 # cannot go backwards.
1686 # Surprisingly sometimes this might not be the case (at
1687 # least on Windows and Linux), see:
1688 # https://github.com/giampaolo/psutil/issues/392
1689 # https://github.com/giampaolo/psutil/issues/645
1690 # https://github.com/giampaolo/psutil/issues/1210
1691 # Trim negative deltas to zero to ignore decreasing fields.
1692 # top does the same. Reference:
1693 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1694 field_delta = max(0, field_delta)
1695 field_deltas.append(field_delta)
1696 return _psplatform.scputimes(*field_deltas)
1697
1698
1699 def cpu_percent(interval=None, percpu=False):
1700 """Return a float representing the current system-wide CPU
1701 utilization as a percentage.
1702
1703 When *interval* is > 0.0 compares system CPU times elapsed before
1704 and after the interval (blocking).
1705
1706 When *interval* is 0.0 or None compares system CPU times elapsed
1707 since last call or module import, returning immediately (non
1708 blocking). That means the first time this is called it will
1709 return a meaningless 0.0 value which you should ignore.
1710 In this case is recommended for accuracy that this function be
1711 called with at least 0.1 seconds between calls.
1712
1713 When *percpu* is True returns a list of floats representing the
1714 utilization as a percentage for each CPU.
1715 First element of the list refers to first CPU, second element
1716 to second CPU and so on.
1717 The order of the list is consistent across calls.
1718
1719 Examples:
1720
1721 >>> # blocking, system-wide
1722 >>> psutil.cpu_percent(interval=1)
1723 2.0
1724 >>>
1725 >>> # blocking, per-cpu
1726 >>> psutil.cpu_percent(interval=1, percpu=True)
1727 [2.0, 1.0]
1728 >>>
1729 >>> # non-blocking (percentage since last call)
1730 >>> psutil.cpu_percent(interval=None)
1731 2.9
1732 >>>
1733 """
1734 global _last_cpu_times
1735 global _last_per_cpu_times
1736 blocking = interval is not None and interval > 0.0
1737 if interval is not None and interval < 0:
1738 raise ValueError("interval is not positive (got %r)" % interval)
1739
1740 def calculate(t1, t2):
1741 times_delta = _cpu_times_deltas(t1, t2)
1742
1743 all_delta = _cpu_tot_time(times_delta)
1744 busy_delta = _cpu_busy_time(times_delta)
1745
1746 try:
1747 busy_perc = (busy_delta / all_delta) * 100
1748 except ZeroDivisionError:
1749 return 0.0
1750 else:
1751 return round(busy_perc, 1)
1752
1753 # system-wide usage
1754 if not percpu:
1755 if blocking:
1756 t1 = cpu_times()
1757 time.sleep(interval)
1758 else:
1759 t1 = _last_cpu_times
1760 if t1 is None:
1761 # Something bad happened at import time. We'll
1762 # get a meaningful result on the next call. See:
1763 # https://github.com/giampaolo/psutil/pull/715
1764 t1 = cpu_times()
1765 _last_cpu_times = cpu_times()
1766 return calculate(t1, _last_cpu_times)
1767 # per-cpu usage
1768 else:
1769 ret = []
1770 if blocking:
1771 tot1 = cpu_times(percpu=True)
1772 time.sleep(interval)
1773 else:
1774 tot1 = _last_per_cpu_times
1775 if tot1 is None:
1776 # Something bad happened at import time. We'll
1777 # get a meaningful result on the next call. See:
1778 # https://github.com/giampaolo/psutil/pull/715
1779 tot1 = cpu_times(percpu=True)
1780 _last_per_cpu_times = cpu_times(percpu=True)
1781 for t1, t2 in zip(tot1, _last_per_cpu_times):
1782 ret.append(calculate(t1, t2))
1783 return ret
1784
1785
1786 # Use separate global vars for cpu_times_percent() so that it's
1787 # independent from cpu_percent() and they can both be used within
1788 # the same program.
1789 _last_cpu_times_2 = _last_cpu_times
1790 _last_per_cpu_times_2 = _last_per_cpu_times
1791
1792
1793 def cpu_times_percent(interval=None, percpu=False):
1794 """Same as cpu_percent() but provides utilization percentages
1795 for each specific CPU time as is returned by cpu_times().
1796 For instance, on Linux we'll get:
1797
1798 >>> cpu_times_percent()
1799 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1800 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1801 >>>
1802
1803 *interval* and *percpu* arguments have the same meaning as in
1804 cpu_percent().
1805 """
1806 global _last_cpu_times_2
1807 global _last_per_cpu_times_2
1808 blocking = interval is not None and interval > 0.0
1809 if interval is not None and interval < 0:
1810 raise ValueError("interval is not positive (got %r)" % interval)
1811
1812 def calculate(t1, t2):
1813 nums = []
1814 times_delta = _cpu_times_deltas(t1, t2)
1815 all_delta = _cpu_tot_time(times_delta)
1816 # "scale" is the value to multiply each delta with to get percentages.
1817 # We use "max" to avoid division by zero (if all_delta is 0, then all
1818 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1819 # fraction because cpu times are integers)
1820 scale = 100.0 / max(1, all_delta)
1821 for field_delta in times_delta:
1822 field_perc = field_delta * scale
1823 field_perc = round(field_perc, 1)
1824 # make sure we don't return negative values or values over 100%
1825 field_perc = min(max(0.0, field_perc), 100.0)
1826 nums.append(field_perc)
1827 return _psplatform.scputimes(*nums)
1828
1829 # system-wide usage
1830 if not percpu:
1831 if blocking:
1832 t1 = cpu_times()
1833 time.sleep(interval)
1834 else:
1835 t1 = _last_cpu_times_2
1836 if t1 is None:
1837 # Something bad happened at import time. We'll
1838 # get a meaningful result on the next call. See:
1839 # https://github.com/giampaolo/psutil/pull/715
1840 t1 = cpu_times()
1841 _last_cpu_times_2 = cpu_times()
1842 return calculate(t1, _last_cpu_times_2)
1843 # per-cpu usage
1844 else:
1845 ret = []
1846 if blocking:
1847 tot1 = cpu_times(percpu=True)
1848 time.sleep(interval)
1849 else:
1850 tot1 = _last_per_cpu_times_2
1851 if tot1 is None:
1852 # Something bad happened at import time. We'll
1853 # get a meaningful result on the next call. See:
1854 # https://github.com/giampaolo/psutil/pull/715
1855 tot1 = cpu_times(percpu=True)
1856 _last_per_cpu_times_2 = cpu_times(percpu=True)
1857 for t1, t2 in zip(tot1, _last_per_cpu_times_2):
1858 ret.append(calculate(t1, t2))
1859 return ret
1860
1861
1862 def cpu_stats():
1863 """Return CPU statistics."""
1864 return _psplatform.cpu_stats()
1865
1866
1867 if hasattr(_psplatform, "cpu_freq"):
1868
1869 def cpu_freq(percpu=False):
1870 """Return CPU frequency as a nameduple including current,
1871 min and max frequency expressed in Mhz.
1872
1873 If *percpu* is True and the system supports per-cpu frequency
1874 retrieval (Linux only) a list of frequencies is returned for
1875 each CPU. If not a list with one element is returned.
1876 """
1877 ret = _psplatform.cpu_freq()
1878 if percpu:
1879 return ret
1880 else:
1881 num_cpus = float(len(ret))
1882 if num_cpus == 0:
1883 return None
1884 elif num_cpus == 1:
1885 return ret[0]
1886 else:
1887 currs, mins, maxs = 0.0, 0.0, 0.0
1888 set_none = False
1889 for cpu in ret:
1890 currs += cpu.current
1891 # On Linux if /proc/cpuinfo is used min/max are set
1892 # to None.
1893 if LINUX and cpu.min is None:
1894 set_none = True
1895 continue
1896 mins += cpu.min
1897 maxs += cpu.max
1898
1899 current = currs / num_cpus
1900
1901 if set_none:
1902 min_ = max_ = None
1903 else:
1904 min_ = mins / num_cpus
1905 max_ = maxs / num_cpus
1906
1907 return _common.scpufreq(current, min_, max_)
1908
1909 __all__.append("cpu_freq")
1910
1911
1912 if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1913 # Perform this hasattr check once on import time to either use the
1914 # platform based code or proxy straight from the os module.
1915 if hasattr(os, "getloadavg"):
1916 getloadavg = os.getloadavg
1917 else:
1918 getloadavg = _psplatform.getloadavg
1919
1920 __all__.append("getloadavg")
1921
1922
1923 # =====================================================================
1924 # --- system memory related functions
1925 # =====================================================================
1926
1927
1928 def virtual_memory():
1929 """Return statistics about system memory usage as a namedtuple
1930 including the following fields, expressed in bytes:
1931
1932 - total:
1933 total physical memory available.
1934
1935 - available:
1936 the memory that can be given instantly to processes without the
1937 system going into swap.
1938 This is calculated by summing different memory values depending
1939 on the platform and it is supposed to be used to monitor actual
1940 memory usage in a cross platform fashion.
1941
1942 - percent:
1943 the percentage usage calculated as (total - available) / total * 100
1944
1945 - used:
1946 memory used, calculated differently depending on the platform and
1947 designed for informational purposes only:
1948 macOS: active + wired
1949 BSD: active + wired + cached
1950 Linux: total - free
1951
1952 - free:
1953 memory not being used at all (zeroed) that is readily available;
1954 note that this doesn't reflect the actual memory available
1955 (use 'available' instead)
1956
1957 Platform-specific fields:
1958
1959 - active (UNIX):
1960 memory currently in use or very recently used, and so it is in RAM.
1961
1962 - inactive (UNIX):
1963 memory that is marked as not used.
1964
1965 - buffers (BSD, Linux):
1966 cache for things like file system metadata.
1967
1968 - cached (BSD, macOS):
1969 cache for various things.
1970
1971 - wired (macOS, BSD):
1972 memory that is marked to always stay in RAM. It is never moved to disk.
1973
1974 - shared (BSD):
1975 memory that may be simultaneously accessed by multiple processes.
1976
1977 The sum of 'used' and 'available' does not necessarily equal total.
1978 On Windows 'available' and 'free' are the same.
1979 """
1980 global _TOTAL_PHYMEM
1981 ret = _psplatform.virtual_memory()
1982 # cached for later use in Process.memory_percent()
1983 _TOTAL_PHYMEM = ret.total
1984 return ret
1985
1986
1987 def swap_memory():
1988 """Return system swap memory statistics as a namedtuple including
1989 the following fields:
1990
1991 - total: total swap memory in bytes
1992 - used: used swap memory in bytes
1993 - free: free swap memory in bytes
1994 - percent: the percentage usage
1995 - sin: no. of bytes the system has swapped in from disk (cumulative)
1996 - sout: no. of bytes the system has swapped out from disk (cumulative)
1997
1998 'sin' and 'sout' on Windows are meaningless and always set to 0.
1999 """
2000 return _psplatform.swap_memory()
2001
2002
2003 # =====================================================================
2004 # --- disks/paritions related functions
2005 # =====================================================================
2006
2007
2008 def disk_usage(path):
2009 """Return disk usage statistics about the given *path* as a
2010 namedtuple including total, used and free space expressed in bytes
2011 plus the percentage usage.
2012 """
2013 return _psplatform.disk_usage(path)
2014
2015
2016 def disk_partitions(all=False):
2017 """Return mounted partitions as a list of
2018 (device, mountpoint, fstype, opts) namedtuple.
2019 'opts' field is a raw string separated by commas indicating mount
2020 options which may vary depending on the platform.
2021
2022 If *all* parameter is False return physical devices only and ignore
2023 all others.
2024 """
2025 return _psplatform.disk_partitions(all)
2026
2027
2028 def disk_io_counters(perdisk=False, nowrap=True):
2029 """Return system disk I/O statistics as a namedtuple including
2030 the following fields:
2031
2032 - read_count: number of reads
2033 - write_count: number of writes
2034 - read_bytes: number of bytes read
2035 - write_bytes: number of bytes written
2036 - read_time: time spent reading from disk (in ms)
2037 - write_time: time spent writing to disk (in ms)
2038
2039 Platform specific:
2040
2041 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2042 - read_merged_count (Linux): number of merged reads
2043 - write_merged_count (Linux): number of merged writes
2044
2045 If *perdisk* is True return the same information for every
2046 physical disk installed on the system as a dictionary
2047 with partition names as the keys and the namedtuple
2048 described above as the values.
2049
2050 If *nowrap* is True it detects and adjust the numbers which overflow
2051 and wrap (restart from 0) and add "old value" to "new value" so that
2052 the returned numbers will always be increasing or remain the same,
2053 but never decrease.
2054 "disk_io_counters.cache_clear()" can be used to invalidate the
2055 cache.
2056
2057 On recent Windows versions 'diskperf -y' command may need to be
2058 executed first otherwise this function won't find any disk.
2059 """
2060 kwargs = dict(perdisk=perdisk) if LINUX else {}
2061 rawdict = _psplatform.disk_io_counters(**kwargs)
2062 if not rawdict:
2063 return {} if perdisk else None
2064 if nowrap:
2065 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2066 nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
2067 if perdisk:
2068 for disk, fields in rawdict.items():
2069 rawdict[disk] = nt(*fields)
2070 return rawdict
2071 else:
2072 return nt(*[sum(x) for x in zip(*rawdict.values())])
2073
2074
2075 disk_io_counters.cache_clear = functools.partial(
2076 _wrap_numbers.cache_clear, 'psutil.disk_io_counters')
2077 disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2078
2079
2080 # =====================================================================
2081 # --- network related functions
2082 # =====================================================================
2083
2084
2085 def net_io_counters(pernic=False, nowrap=True):
2086 """Return network I/O statistics as a namedtuple including
2087 the following fields:
2088
2089 - bytes_sent: number of bytes sent
2090 - bytes_recv: number of bytes received
2091 - packets_sent: number of packets sent
2092 - packets_recv: number of packets received
2093 - errin: total number of errors while receiving
2094 - errout: total number of errors while sending
2095 - dropin: total number of incoming packets which were dropped
2096 - dropout: total number of outgoing packets which were dropped
2097 (always 0 on macOS and BSD)
2098
2099 If *pernic* is True return the same information for every
2100 network interface installed on the system as a dictionary
2101 with network interface names as the keys and the namedtuple
2102 described above as the values.
2103
2104 If *nowrap* is True it detects and adjust the numbers which overflow
2105 and wrap (restart from 0) and add "old value" to "new value" so that
2106 the returned numbers will always be increasing or remain the same,
2107 but never decrease.
2108 "disk_io_counters.cache_clear()" can be used to invalidate the
2109 cache.
2110 """
2111 rawdict = _psplatform.net_io_counters()
2112 if not rawdict:
2113 return {} if pernic else None
2114 if nowrap:
2115 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2116 if pernic:
2117 for nic, fields in rawdict.items():
2118 rawdict[nic] = _common.snetio(*fields)
2119 return rawdict
2120 else:
2121 return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
2122
2123
2124 net_io_counters.cache_clear = functools.partial(
2125 _wrap_numbers.cache_clear, 'psutil.net_io_counters')
2126 net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2127
2128
2129 def net_connections(kind='inet'):
2130 """Return system-wide socket connections as a list of
2131 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2132 In case of limited privileges 'fd' and 'pid' may be set to -1
2133 and None respectively.
2134 The *kind* parameter filters for connections that fit the
2135 following criteria:
2136
2137 +------------+----------------------------------------------------+
2138 | Kind Value | Connections using |
2139 +------------+----------------------------------------------------+
2140 | inet | IPv4 and IPv6 |
2141 | inet4 | IPv4 |
2142 | inet6 | IPv6 |
2143 | tcp | TCP |
2144 | tcp4 | TCP over IPv4 |
2145 | tcp6 | TCP over IPv6 |
2146 | udp | UDP |
2147 | udp4 | UDP over IPv4 |
2148 | udp6 | UDP over IPv6 |
2149 | unix | UNIX socket (both UDP and TCP protocols) |
2150 | all | the sum of all the possible families and protocols |
2151 +------------+----------------------------------------------------+
2152
2153 On macOS this function requires root privileges.
2154 """
2155 return _psplatform.net_connections(kind)
2156
2157
2158 def net_if_addrs():
2159 """Return the addresses associated to each NIC (network interface
2160 card) installed on the system as a dictionary whose keys are the
2161 NIC names and value is a list of namedtuples for each address
2162 assigned to the NIC. Each namedtuple includes 5 fields:
2163
2164 - family: can be either socket.AF_INET, socket.AF_INET6 or
2165 psutil.AF_LINK, which refers to a MAC address.
2166 - address: is the primary address and it is always set.
2167 - netmask: and 'broadcast' and 'ptp' may be None.
2168 - ptp: stands for "point to point" and references the
2169 destination address on a point to point interface
2170 (typically a VPN).
2171 - broadcast: and *ptp* are mutually exclusive.
2172
2173 Note: you can have more than one address of the same family
2174 associated with each interface.
2175 """
2176 has_enums = sys.version_info >= (3, 4)
2177 if has_enums:
2178 import socket
2179 rawlist = _psplatform.net_if_addrs()
2180 rawlist.sort(key=lambda x: x[1]) # sort by family
2181 ret = collections.defaultdict(list)
2182 for name, fam, addr, mask, broadcast, ptp in rawlist:
2183 if has_enums:
2184 try:
2185 fam = socket.AddressFamily(fam)
2186 except ValueError:
2187 if WINDOWS and fam == -1:
2188 fam = _psplatform.AF_LINK
2189 elif (hasattr(_psplatform, "AF_LINK") and
2190 _psplatform.AF_LINK == fam):
2191 # Linux defines AF_LINK as an alias for AF_PACKET.
2192 # We re-set the family here so that repr(family)
2193 # will show AF_LINK rather than AF_PACKET
2194 fam = _psplatform.AF_LINK
2195 if fam == _psplatform.AF_LINK:
2196 # The underlying C function may return an incomplete MAC
2197 # address in which case we fill it with null bytes, see:
2198 # https://github.com/giampaolo/psutil/issues/786
2199 separator = ":" if POSIX else "-"
2200 while addr.count(separator) < 5:
2201 addr += "%s00" % separator
2202 ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp))
2203 return dict(ret)
2204
2205
2206 def net_if_stats():
2207 """Return information about each NIC (network interface card)
2208 installed on the system as a dictionary whose keys are the
2209 NIC names and value is a namedtuple with the following fields:
2210
2211 - isup: whether the interface is up (bool)
2212 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2213 NIC_DUPLEX_UNKNOWN
2214 - speed: the NIC speed expressed in mega bits (MB); if it can't
2215 be determined (e.g. 'localhost') it will be set to 0.
2216 - mtu: the maximum transmission unit expressed in bytes.
2217 """
2218 return _psplatform.net_if_stats()
2219
2220
2221 # =====================================================================
2222 # --- sensors
2223 # =====================================================================
2224
2225
2226 # Linux, macOS
2227 if hasattr(_psplatform, "sensors_temperatures"):
2228
2229 def sensors_temperatures(fahrenheit=False):
2230 """Return hardware temperatures. Each entry is a namedtuple
2231 representing a certain hardware sensor (it may be a CPU, an
2232 hard disk or something else, depending on the OS and its
2233 configuration).
2234 All temperatures are expressed in celsius unless *fahrenheit*
2235 is set to True.
2236 """
2237 def convert(n):
2238 if n is not None:
2239 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2240
2241 ret = collections.defaultdict(list)
2242 rawdict = _psplatform.sensors_temperatures()
2243
2244 for name, values in rawdict.items():
2245 while values:
2246 label, current, high, critical = values.pop(0)
2247 current = convert(current)
2248 high = convert(high)
2249 critical = convert(critical)
2250
2251 if high and not critical:
2252 critical = high
2253 elif critical and not high:
2254 high = critical
2255
2256 ret[name].append(
2257 _common.shwtemp(label, current, high, critical))
2258
2259 return dict(ret)
2260
2261 __all__.append("sensors_temperatures")
2262
2263
2264 # Linux, macOS
2265 if hasattr(_psplatform, "sensors_fans"):
2266
2267 def sensors_fans():
2268 """Return fans speed. Each entry is a namedtuple
2269 representing a certain hardware sensor.
2270 All speed are expressed in RPM (rounds per minute).
2271 """
2272 return _psplatform.sensors_fans()
2273
2274 __all__.append("sensors_fans")
2275
2276
2277 # Linux, Windows, FreeBSD, macOS
2278 if hasattr(_psplatform, "sensors_battery"):
2279
2280 def sensors_battery():
2281 """Return battery information. If no battery is installed
2282 returns None.
2283
2284 - percent: battery power left as a percentage.
2285 - secsleft: a rough approximation of how many seconds are left
2286 before the battery runs out of power. May be
2287 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2288 - power_plugged: True if the AC power cable is connected.
2289 """
2290 return _psplatform.sensors_battery()
2291
2292 __all__.append("sensors_battery")
2293
2294
2295 # =====================================================================
2296 # --- other system related functions
2297 # =====================================================================
2298
2299
2300 def boot_time():
2301 """Return the system boot time expressed in seconds since the epoch."""
2302 # Note: we are not caching this because it is subject to
2303 # system clock updates.
2304 return _psplatform.boot_time()
2305
2306
2307 def users():
2308 """Return users currently connected on the system as a list of
2309 namedtuples including the following fields.
2310
2311 - user: the name of the user
2312 - terminal: the tty or pseudo-tty associated with the user, if any.
2313 - host: the host name associated with the entry, if any.
2314 - started: the creation time as a floating point number expressed in
2315 seconds since the epoch.
2316 """
2317 return _psplatform.users()
2318
2319
2320 # =====================================================================
2321 # --- Windows services
2322 # =====================================================================
2323
2324
2325 if WINDOWS:
2326
2327 def win_service_iter():
2328 """Return a generator yielding a WindowsService instance for all
2329 Windows services installed.
2330 """
2331 return _psplatform.win_service_iter()
2332
2333 def win_service_get(name):
2334 """Get a Windows service by *name*.
2335 Raise NoSuchProcess if no service with such name exists.
2336 """
2337 return _psplatform.win_service_get(name)
2338
2339
2340 # =====================================================================
2341
2342
2343 def test(): # pragma: no cover
2344 from ._common import bytes2human
2345 from ._compat import get_terminal_size
2346
2347 today_day = datetime.date.today()
2348 templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s"
2349 attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times',
2350 'create_time', 'memory_info', 'status', 'nice', 'username']
2351 print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE",
2352 "STATUS", "START", "TIME", "CMDLINE"))
2353 for p in process_iter(attrs, ad_value=None):
2354 if p.info['create_time']:
2355 ctime = datetime.datetime.fromtimestamp(p.info['create_time'])
2356 if ctime.date() == today_day:
2357 ctime = ctime.strftime("%H:%M")
2358 else:
2359 ctime = ctime.strftime("%b%d")
2360 else:
2361 ctime = ''
2362 if p.info['cpu_times']:
2363 cputime = time.strftime("%M:%S",
2364 time.localtime(sum(p.info['cpu_times'])))
2365 else:
2366 cputime = ''
2367
2368 user = p.info['username'] or ''
2369 if not user and POSIX:
2370 try:
2371 user = p.uids()[0]
2372 except Error:
2373 pass
2374 if user and WINDOWS and '\\' in user:
2375 user = user.split('\\')[1]
2376 user = user[:9]
2377 vms = bytes2human(p.info['memory_info'].vms) if \
2378 p.info['memory_info'] is not None else ''
2379 rss = bytes2human(p.info['memory_info'].rss) if \
2380 p.info['memory_info'] is not None else ''
2381 memp = round(p.info['memory_percent'], 1) if \
2382 p.info['memory_percent'] is not None else ''
2383 nice = int(p.info['nice']) if p.info['nice'] else ''
2384 if p.info['cmdline']:
2385 cmdline = ' '.join(p.info['cmdline'])
2386 else:
2387 cmdline = p.info['name']
2388 status = p.info['status'][:5] if p.info['status'] else ''
2389
2390 line = templ % (
2391 user[:10],
2392 p.info['pid'],
2393 memp,
2394 vms,
2395 rss,
2396 nice,
2397 status,
2398 ctime,
2399 cputime,
2400 cmdline)
2401 print(line[:get_terminal_size()[0]])
2402
2403
2404 del memoize, memoize_when_activated, division, deprecated_method
2405 if sys.version_info[0] < 3:
2406 del num, x
2407
2408 if __name__ == "__main__":
2409 test()