Mercurial > repos > guerler > springsuite
comparison planemo/lib/python3.7/site-packages/psutil/__init__.py @ 1:56ad4e20f292 draft
"planemo upload commit 6eee67778febed82ddd413c3ca40b3183a3898f1"
author | guerler |
---|---|
date | Fri, 31 Jul 2020 00:32:28 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
0:d30785e31577 | 1:56ad4e20f292 |
---|---|
1 # -*- coding: utf-8 -*- | |
2 | |
3 # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. | |
4 # Use of this source code is governed by a BSD-style license that can be | |
5 # found in the LICENSE file. | |
6 | |
7 """psutil is a cross-platform library for retrieving information on | |
8 running processes and system utilization (CPU, memory, disks, network, | |
9 sensors) in Python. Supported platforms: | |
10 | |
11 - Linux | |
12 - Windows | |
13 - macOS | |
14 - FreeBSD | |
15 - OpenBSD | |
16 - NetBSD | |
17 - Sun Solaris | |
18 - AIX | |
19 | |
20 Works with Python versions from 2.6 to 3.4+. | |
21 """ | |
22 | |
23 from __future__ import division | |
24 import collections | |
25 import contextlib | |
26 import datetime | |
27 import functools | |
28 import os | |
29 import signal | |
30 import subprocess | |
31 import sys | |
32 import threading | |
33 import time | |
34 try: | |
35 import pwd | |
36 except ImportError: | |
37 pwd = None | |
38 | |
39 from . import _common | |
40 from ._common import AccessDenied | |
41 from ._common import Error | |
42 from ._common import memoize_when_activated | |
43 from ._common import NoSuchProcess | |
44 from ._common import TimeoutExpired | |
45 from ._common import wrap_numbers as _wrap_numbers | |
46 from ._common import ZombieProcess | |
47 from ._compat import long | |
48 from ._compat import PermissionError | |
49 from ._compat import ProcessLookupError | |
50 from ._compat import PY3 as _PY3 | |
51 | |
52 from ._common import CONN_CLOSE | |
53 from ._common import CONN_CLOSE_WAIT | |
54 from ._common import CONN_CLOSING | |
55 from ._common import CONN_ESTABLISHED | |
56 from ._common import CONN_FIN_WAIT1 | |
57 from ._common import CONN_FIN_WAIT2 | |
58 from ._common import CONN_LAST_ACK | |
59 from ._common import CONN_LISTEN | |
60 from ._common import CONN_NONE | |
61 from ._common import CONN_SYN_RECV | |
62 from ._common import CONN_SYN_SENT | |
63 from ._common import CONN_TIME_WAIT | |
64 from ._common import NIC_DUPLEX_FULL | |
65 from ._common import NIC_DUPLEX_HALF | |
66 from ._common import NIC_DUPLEX_UNKNOWN | |
67 from ._common import POWER_TIME_UNKNOWN | |
68 from ._common import POWER_TIME_UNLIMITED | |
69 from ._common import STATUS_DEAD | |
70 from ._common import STATUS_DISK_SLEEP | |
71 from ._common import STATUS_IDLE | |
72 from ._common import STATUS_LOCKED | |
73 from ._common import STATUS_PARKED | |
74 from ._common import STATUS_RUNNING | |
75 from ._common import STATUS_SLEEPING | |
76 from ._common import STATUS_STOPPED | |
77 from ._common import STATUS_TRACING_STOP | |
78 from ._common import STATUS_WAITING | |
79 from ._common import STATUS_WAKING | |
80 from ._common import STATUS_ZOMBIE | |
81 | |
82 from ._common import AIX | |
83 from ._common import BSD | |
84 from ._common import FREEBSD # NOQA | |
85 from ._common import LINUX | |
86 from ._common import MACOS | |
87 from ._common import NETBSD # NOQA | |
88 from ._common import OPENBSD # NOQA | |
89 from ._common import OSX # deprecated alias | |
90 from ._common import POSIX # NOQA | |
91 from ._common import SUNOS | |
92 from ._common import WINDOWS | |
93 | |
94 if LINUX: | |
95 # This is public API and it will be retrieved from _pslinux.py | |
96 # via sys.modules. | |
97 PROCFS_PATH = "/proc" | |
98 | |
99 from . import _pslinux as _psplatform | |
100 | |
101 from ._pslinux import IOPRIO_CLASS_BE # NOQA | |
102 from ._pslinux import IOPRIO_CLASS_IDLE # NOQA | |
103 from ._pslinux import IOPRIO_CLASS_NONE # NOQA | |
104 from ._pslinux import IOPRIO_CLASS_RT # NOQA | |
105 # Linux >= 2.6.36 | |
106 if _psplatform.HAS_PRLIMIT: | |
107 from ._psutil_linux import RLIM_INFINITY # NOQA | |
108 from ._psutil_linux import RLIMIT_AS # NOQA | |
109 from ._psutil_linux import RLIMIT_CORE # NOQA | |
110 from ._psutil_linux import RLIMIT_CPU # NOQA | |
111 from ._psutil_linux import RLIMIT_DATA # NOQA | |
112 from ._psutil_linux import RLIMIT_FSIZE # NOQA | |
113 from ._psutil_linux import RLIMIT_LOCKS # NOQA | |
114 from ._psutil_linux import RLIMIT_MEMLOCK # NOQA | |
115 from ._psutil_linux import RLIMIT_NOFILE # NOQA | |
116 from ._psutil_linux import RLIMIT_NPROC # NOQA | |
117 from ._psutil_linux import RLIMIT_RSS # NOQA | |
118 from ._psutil_linux import RLIMIT_STACK # NOQA | |
119 # Kinda ugly but considerably faster than using hasattr() and | |
120 # setattr() against the module object (we are at import time: | |
121 # speed matters). | |
122 from . import _psutil_linux | |
123 try: | |
124 RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE | |
125 except AttributeError: | |
126 pass | |
127 try: | |
128 RLIMIT_NICE = _psutil_linux.RLIMIT_NICE | |
129 except AttributeError: | |
130 pass | |
131 try: | |
132 RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO | |
133 except AttributeError: | |
134 pass | |
135 try: | |
136 RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME | |
137 except AttributeError: | |
138 pass | |
139 try: | |
140 RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING | |
141 except AttributeError: | |
142 pass | |
143 | |
144 elif WINDOWS: | |
145 from . import _pswindows as _psplatform | |
146 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA | |
147 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA | |
148 from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA | |
149 from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA | |
150 from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA | |
151 from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA | |
152 from ._pswindows import CONN_DELETE_TCB # NOQA | |
153 from ._pswindows import IOPRIO_VERYLOW # NOQA | |
154 from ._pswindows import IOPRIO_LOW # NOQA | |
155 from ._pswindows import IOPRIO_NORMAL # NOQA | |
156 from ._pswindows import IOPRIO_HIGH # NOQA | |
157 | |
158 elif MACOS: | |
159 from . import _psosx as _psplatform | |
160 | |
161 elif BSD: | |
162 from . import _psbsd as _psplatform | |
163 | |
164 elif SUNOS: | |
165 from . import _pssunos as _psplatform | |
166 from ._pssunos import CONN_BOUND # NOQA | |
167 from ._pssunos import CONN_IDLE # NOQA | |
168 | |
169 # This is public writable API which is read from _pslinux.py and | |
170 # _pssunos.py via sys.modules. | |
171 PROCFS_PATH = "/proc" | |
172 | |
173 elif AIX: | |
174 from . import _psaix as _psplatform | |
175 | |
176 # This is public API and it will be retrieved from _pslinux.py | |
177 # via sys.modules. | |
178 PROCFS_PATH = "/proc" | |
179 | |
180 else: # pragma: no cover | |
181 raise NotImplementedError('platform %s is not supported' % sys.platform) | |
182 | |
183 | |
184 __all__ = [ | |
185 # exceptions | |
186 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied", | |
187 "TimeoutExpired", | |
188 | |
189 # constants | |
190 "version_info", "__version__", | |
191 | |
192 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP", | |
193 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD", | |
194 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED", | |
195 "STATUS_PARKED", | |
196 | |
197 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", | |
198 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", | |
199 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE", | |
200 | |
201 "AF_LINK", | |
202 | |
203 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN", | |
204 | |
205 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED", | |
206 | |
207 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX", | |
208 "SUNOS", "WINDOWS", "AIX", | |
209 | |
210 # classes | |
211 "Process", "Popen", | |
212 | |
213 # functions | |
214 "pid_exists", "pids", "process_iter", "wait_procs", # proc | |
215 "virtual_memory", "swap_memory", # memory | |
216 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu | |
217 "cpu_stats", # "cpu_freq", "getloadavg" | |
218 "net_io_counters", "net_connections", "net_if_addrs", # network | |
219 "net_if_stats", | |
220 "disk_io_counters", "disk_partitions", "disk_usage", # disk | |
221 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors | |
222 "users", "boot_time", # others | |
223 ] | |
224 | |
225 AF_LINK = _psplatform.AF_LINK | |
226 | |
227 __all__.extend(_psplatform.__extra__all__) | |
228 __author__ = "Giampaolo Rodola'" | |
229 __version__ = "5.7.2" | |
230 version_info = tuple([int(num) for num in __version__.split('.')]) | |
231 | |
232 _timer = getattr(time, 'monotonic', time.time) | |
233 _TOTAL_PHYMEM = None | |
234 _LOWEST_PID = None | |
235 _SENTINEL = object() | |
236 | |
237 # Sanity check in case the user messed up with psutil installation | |
238 # or did something weird with sys.path. In this case we might end | |
239 # up importing a python module using a C extension module which | |
240 # was compiled for a different version of psutil. | |
241 # We want to prevent that by failing sooner rather than later. | |
242 # See: https://github.com/giampaolo/psutil/issues/564 | |
243 if (int(__version__.replace('.', '')) != | |
244 getattr(_psplatform.cext, 'version', None)): | |
245 msg = "version conflict: %r C extension module was built for another " \ | |
246 "version of psutil" % getattr(_psplatform.cext, "__file__") | |
247 if hasattr(_psplatform.cext, 'version'): | |
248 msg += " (%s instead of %s)" % ( | |
249 '.'.join([x for x in str(_psplatform.cext.version)]), __version__) | |
250 else: | |
251 msg += " (different than %s)" % __version__ | |
252 msg += "; you may try to 'pip uninstall psutil', manually remove %s" % ( | |
253 getattr(_psplatform.cext, "__file__", | |
254 "the existing psutil install directory")) | |
255 msg += " or clean the virtual env somehow, then reinstall" | |
256 raise ImportError(msg) | |
257 | |
258 | |
259 # ===================================================================== | |
260 # --- Utils | |
261 # ===================================================================== | |
262 | |
263 | |
264 if hasattr(_psplatform, 'ppid_map'): | |
265 # Faster version (Windows and Linux). | |
266 _ppid_map = _psplatform.ppid_map | |
267 else: | |
268 def _ppid_map(): | |
269 """Return a {pid: ppid, ...} dict for all running processes in | |
270 one shot. Used to speed up Process.children(). | |
271 """ | |
272 ret = {} | |
273 for pid in pids(): | |
274 try: | |
275 ret[pid] = _psplatform.Process(pid).ppid() | |
276 except (NoSuchProcess, ZombieProcess): | |
277 pass | |
278 return ret | |
279 | |
280 | |
281 def _assert_pid_not_reused(fun): | |
282 """Decorator which raises NoSuchProcess in case a process is no | |
283 longer running or its PID has been reused. | |
284 """ | |
285 @functools.wraps(fun) | |
286 def wrapper(self, *args, **kwargs): | |
287 if not self.is_running(): | |
288 raise NoSuchProcess(self.pid, self._name) | |
289 return fun(self, *args, **kwargs) | |
290 return wrapper | |
291 | |
292 | |
293 def _pprint_secs(secs): | |
294 """Format seconds in a human readable form.""" | |
295 now = time.time() | |
296 secs_ago = int(now - secs) | |
297 if secs_ago < 60 * 60 * 24: | |
298 fmt = "%H:%M:%S" | |
299 else: | |
300 fmt = "%Y-%m-%d %H:%M:%S" | |
301 return datetime.datetime.fromtimestamp(secs).strftime(fmt) | |
302 | |
303 | |
304 # ===================================================================== | |
305 # --- Process class | |
306 # ===================================================================== | |
307 | |
308 | |
309 class Process(object): | |
310 """Represents an OS process with the given PID. | |
311 If PID is omitted current process PID (os.getpid()) is used. | |
312 Raise NoSuchProcess if PID does not exist. | |
313 | |
314 Note that most of the methods of this class do not make sure | |
315 the PID of the process being queried has been reused over time. | |
316 That means you might end up retrieving an information referring | |
317 to another process in case the original one this instance | |
318 refers to is gone in the meantime. | |
319 | |
320 The only exceptions for which process identity is pre-emptively | |
321 checked and guaranteed are: | |
322 | |
323 - parent() | |
324 - children() | |
325 - nice() (set) | |
326 - ionice() (set) | |
327 - rlimit() (set) | |
328 - cpu_affinity (set) | |
329 - suspend() | |
330 - resume() | |
331 - send_signal() | |
332 - terminate() | |
333 - kill() | |
334 | |
335 To prevent this problem for all other methods you can: | |
336 - use is_running() before querying the process | |
337 - if you're continuously iterating over a set of Process | |
338 instances use process_iter() which pre-emptively checks | |
339 process identity for every yielded instance | |
340 """ | |
341 | |
342 def __init__(self, pid=None): | |
343 self._init(pid) | |
344 | |
345 def _init(self, pid, _ignore_nsp=False): | |
346 if pid is None: | |
347 pid = os.getpid() | |
348 else: | |
349 if not _PY3 and not isinstance(pid, (int, long)): | |
350 raise TypeError('pid must be an integer (got %r)' % pid) | |
351 if pid < 0: | |
352 raise ValueError('pid must be a positive integer (got %s)' | |
353 % pid) | |
354 self._pid = pid | |
355 self._name = None | |
356 self._exe = None | |
357 self._create_time = None | |
358 self._gone = False | |
359 self._hash = None | |
360 self._lock = threading.RLock() | |
361 # used for caching on Windows only (on POSIX ppid may change) | |
362 self._ppid = None | |
363 # platform-specific modules define an _psplatform.Process | |
364 # implementation class | |
365 self._proc = _psplatform.Process(pid) | |
366 self._last_sys_cpu_times = None | |
367 self._last_proc_cpu_times = None | |
368 self._exitcode = _SENTINEL | |
369 # cache creation time for later use in is_running() method | |
370 try: | |
371 self.create_time() | |
372 except AccessDenied: | |
373 # We should never get here as AFAIK we're able to get | |
374 # process creation time on all platforms even as a | |
375 # limited user. | |
376 pass | |
377 except ZombieProcess: | |
378 # Zombies can still be queried by this class (although | |
379 # not always) and pids() return them so just go on. | |
380 pass | |
381 except NoSuchProcess: | |
382 if not _ignore_nsp: | |
383 msg = 'no process found with pid %s' % pid | |
384 raise NoSuchProcess(pid, None, msg) | |
385 else: | |
386 self._gone = True | |
387 # This pair is supposed to indentify a Process instance | |
388 # univocally over time (the PID alone is not enough as | |
389 # it might refer to a process whose PID has been reused). | |
390 # This will be used later in __eq__() and is_running(). | |
391 self._ident = (self.pid, self._create_time) | |
392 | |
393 def __str__(self): | |
394 try: | |
395 info = collections.OrderedDict() | |
396 except AttributeError: | |
397 info = {} # Python 2.6 | |
398 info["pid"] = self.pid | |
399 if self._name: | |
400 info['name'] = self._name | |
401 with self.oneshot(): | |
402 try: | |
403 info["name"] = self.name() | |
404 info["status"] = self.status() | |
405 except ZombieProcess: | |
406 info["status"] = "zombie" | |
407 except NoSuchProcess: | |
408 info["status"] = "terminated" | |
409 except AccessDenied: | |
410 pass | |
411 if self._exitcode not in (_SENTINEL, None): | |
412 info["exitcode"] = self._exitcode | |
413 if self._create_time: | |
414 info['started'] = _pprint_secs(self._create_time) | |
415 return "%s.%s(%s)" % ( | |
416 self.__class__.__module__, | |
417 self.__class__.__name__, | |
418 ", ".join(["%s=%r" % (k, v) for k, v in info.items()])) | |
419 | |
420 __repr__ = __str__ | |
421 | |
422 def __eq__(self, other): | |
423 # Test for equality with another Process object based | |
424 # on PID and creation time. | |
425 if not isinstance(other, Process): | |
426 return NotImplemented | |
427 return self._ident == other._ident | |
428 | |
429 def __ne__(self, other): | |
430 return not self == other | |
431 | |
432 def __hash__(self): | |
433 if self._hash is None: | |
434 self._hash = hash(self._ident) | |
435 return self._hash | |
436 | |
437 @property | |
438 def pid(self): | |
439 """The process PID.""" | |
440 return self._pid | |
441 | |
442 # --- utility methods | |
443 | |
444 @contextlib.contextmanager | |
445 def oneshot(self): | |
446 """Utility context manager which considerably speeds up the | |
447 retrieval of multiple process information at the same time. | |
448 | |
449 Internally different process info (e.g. name, ppid, uids, | |
450 gids, ...) may be fetched by using the same routine, but | |
451 only one information is returned and the others are discarded. | |
452 When using this context manager the internal routine is | |
453 executed once (in the example below on name()) and the | |
454 other info are cached. | |
455 | |
456 The cache is cleared when exiting the context manager block. | |
457 The advice is to use this every time you retrieve more than | |
458 one information about the process. If you're lucky, you'll | |
459 get a hell of a speedup. | |
460 | |
461 >>> import psutil | |
462 >>> p = psutil.Process() | |
463 >>> with p.oneshot(): | |
464 ... p.name() # collect multiple info | |
465 ... p.cpu_times() # return cached value | |
466 ... p.cpu_percent() # return cached value | |
467 ... p.create_time() # return cached value | |
468 ... | |
469 >>> | |
470 """ | |
471 with self._lock: | |
472 if hasattr(self, "_cache"): | |
473 # NOOP: this covers the use case where the user enters the | |
474 # context twice: | |
475 # | |
476 # >>> with p.oneshot(): | |
477 # ... with p.oneshot(): | |
478 # ... | |
479 # | |
480 # Also, since as_dict() internally uses oneshot() | |
481 # I expect that the code below will be a pretty common | |
482 # "mistake" that the user will make, so let's guard | |
483 # against that: | |
484 # | |
485 # >>> with p.oneshot(): | |
486 # ... p.as_dict() | |
487 # ... | |
488 yield | |
489 else: | |
490 try: | |
491 # cached in case cpu_percent() is used | |
492 self.cpu_times.cache_activate(self) | |
493 # cached in case memory_percent() is used | |
494 self.memory_info.cache_activate(self) | |
495 # cached in case parent() is used | |
496 self.ppid.cache_activate(self) | |
497 # cached in case username() is used | |
498 if POSIX: | |
499 self.uids.cache_activate(self) | |
500 # specific implementation cache | |
501 self._proc.oneshot_enter() | |
502 yield | |
503 finally: | |
504 self.cpu_times.cache_deactivate(self) | |
505 self.memory_info.cache_deactivate(self) | |
506 self.ppid.cache_deactivate(self) | |
507 if POSIX: | |
508 self.uids.cache_deactivate(self) | |
509 self._proc.oneshot_exit() | |
510 | |
511 def as_dict(self, attrs=None, ad_value=None): | |
512 """Utility method returning process information as a | |
513 hashable dictionary. | |
514 If *attrs* is specified it must be a list of strings | |
515 reflecting available Process class' attribute names | |
516 (e.g. ['cpu_times', 'name']) else all public (read | |
517 only) attributes are assumed. | |
518 *ad_value* is the value which gets assigned in case | |
519 AccessDenied or ZombieProcess exception is raised when | |
520 retrieving that particular process information. | |
521 """ | |
522 valid_names = _as_dict_attrnames | |
523 if attrs is not None: | |
524 if not isinstance(attrs, (list, tuple, set, frozenset)): | |
525 raise TypeError("invalid attrs type %s" % type(attrs)) | |
526 attrs = set(attrs) | |
527 invalid_names = attrs - valid_names | |
528 if invalid_names: | |
529 raise ValueError("invalid attr name%s %s" % ( | |
530 "s" if len(invalid_names) > 1 else "", | |
531 ", ".join(map(repr, invalid_names)))) | |
532 | |
533 retdict = dict() | |
534 ls = attrs or valid_names | |
535 with self.oneshot(): | |
536 for name in ls: | |
537 try: | |
538 if name == 'pid': | |
539 ret = self.pid | |
540 else: | |
541 meth = getattr(self, name) | |
542 ret = meth() | |
543 except (AccessDenied, ZombieProcess): | |
544 ret = ad_value | |
545 except NotImplementedError: | |
546 # in case of not implemented functionality (may happen | |
547 # on old or exotic systems) we want to crash only if | |
548 # the user explicitly asked for that particular attr | |
549 if attrs: | |
550 raise | |
551 continue | |
552 retdict[name] = ret | |
553 return retdict | |
554 | |
555 def parent(self): | |
556 """Return the parent process as a Process object pre-emptively | |
557 checking whether PID has been reused. | |
558 If no parent is known return None. | |
559 """ | |
560 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0] | |
561 if self.pid == lowest_pid: | |
562 return None | |
563 ppid = self.ppid() | |
564 if ppid is not None: | |
565 ctime = self.create_time() | |
566 try: | |
567 parent = Process(ppid) | |
568 if parent.create_time() <= ctime: | |
569 return parent | |
570 # ...else ppid has been reused by another process | |
571 except NoSuchProcess: | |
572 pass | |
573 | |
574 def parents(self): | |
575 """Return the parents of this process as a list of Process | |
576 instances. If no parents are known return an empty list. | |
577 """ | |
578 parents = [] | |
579 proc = self.parent() | |
580 while proc is not None: | |
581 parents.append(proc) | |
582 proc = proc.parent() | |
583 return parents | |
584 | |
585 def is_running(self): | |
586 """Return whether this process is running. | |
587 It also checks if PID has been reused by another process in | |
588 which case return False. | |
589 """ | |
590 if self._gone: | |
591 return False | |
592 try: | |
593 # Checking if PID is alive is not enough as the PID might | |
594 # have been reused by another process: we also want to | |
595 # verify process identity. | |
596 # Process identity / uniqueness over time is guaranteed by | |
597 # (PID + creation time) and that is verified in __eq__. | |
598 return self == Process(self.pid) | |
599 except ZombieProcess: | |
600 # We should never get here as it's already handled in | |
601 # Process.__init__; here just for extra safety. | |
602 return True | |
603 except NoSuchProcess: | |
604 self._gone = True | |
605 return False | |
606 | |
607 # --- actual API | |
608 | |
609 @memoize_when_activated | |
610 def ppid(self): | |
611 """The process parent PID. | |
612 On Windows the return value is cached after first call. | |
613 """ | |
614 # On POSIX we don't want to cache the ppid as it may unexpectedly | |
615 # change to 1 (init) in case this process turns into a zombie: | |
616 # https://github.com/giampaolo/psutil/issues/321 | |
617 # http://stackoverflow.com/questions/356722/ | |
618 | |
619 # XXX should we check creation time here rather than in | |
620 # Process.parent()? | |
621 if POSIX: | |
622 return self._proc.ppid() | |
623 else: # pragma: no cover | |
624 self._ppid = self._ppid or self._proc.ppid() | |
625 return self._ppid | |
626 | |
627 def name(self): | |
628 """The process name. The return value is cached after first call.""" | |
629 # Process name is only cached on Windows as on POSIX it may | |
630 # change, see: | |
631 # https://github.com/giampaolo/psutil/issues/692 | |
632 if WINDOWS and self._name is not None: | |
633 return self._name | |
634 name = self._proc.name() | |
635 if POSIX and len(name) >= 15: | |
636 # On UNIX the name gets truncated to the first 15 characters. | |
637 # If it matches the first part of the cmdline we return that | |
638 # one instead because it's usually more explicative. | |
639 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon". | |
640 try: | |
641 cmdline = self.cmdline() | |
642 except AccessDenied: | |
643 pass | |
644 else: | |
645 if cmdline: | |
646 extended_name = os.path.basename(cmdline[0]) | |
647 if extended_name.startswith(name): | |
648 name = extended_name | |
649 self._name = name | |
650 self._proc._name = name | |
651 return name | |
652 | |
653 def exe(self): | |
654 """The process executable as an absolute path. | |
655 May also be an empty string. | |
656 The return value is cached after first call. | |
657 """ | |
658 def guess_it(fallback): | |
659 # try to guess exe from cmdline[0] in absence of a native | |
660 # exe representation | |
661 cmdline = self.cmdline() | |
662 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'): | |
663 exe = cmdline[0] # the possible exe | |
664 # Attempt to guess only in case of an absolute path. | |
665 # It is not safe otherwise as the process might have | |
666 # changed cwd. | |
667 if (os.path.isabs(exe) and | |
668 os.path.isfile(exe) and | |
669 os.access(exe, os.X_OK)): | |
670 return exe | |
671 if isinstance(fallback, AccessDenied): | |
672 raise fallback | |
673 return fallback | |
674 | |
675 if self._exe is None: | |
676 try: | |
677 exe = self._proc.exe() | |
678 except AccessDenied as err: | |
679 return guess_it(fallback=err) | |
680 else: | |
681 if not exe: | |
682 # underlying implementation can legitimately return an | |
683 # empty string; if that's the case we don't want to | |
684 # raise AD while guessing from the cmdline | |
685 try: | |
686 exe = guess_it(fallback=exe) | |
687 except AccessDenied: | |
688 pass | |
689 self._exe = exe | |
690 return self._exe | |
691 | |
692 def cmdline(self): | |
693 """The command line this process has been called with.""" | |
694 return self._proc.cmdline() | |
695 | |
696 def status(self): | |
697 """The process current status as a STATUS_* constant.""" | |
698 try: | |
699 return self._proc.status() | |
700 except ZombieProcess: | |
701 return STATUS_ZOMBIE | |
702 | |
703 def username(self): | |
704 """The name of the user that owns the process. | |
705 On UNIX this is calculated by using *real* process uid. | |
706 """ | |
707 if POSIX: | |
708 if pwd is None: | |
709 # might happen if python was installed from sources | |
710 raise ImportError( | |
711 "requires pwd module shipped with standard python") | |
712 real_uid = self.uids().real | |
713 try: | |
714 return pwd.getpwuid(real_uid).pw_name | |
715 except KeyError: | |
716 # the uid can't be resolved by the system | |
717 return str(real_uid) | |
718 else: | |
719 return self._proc.username() | |
720 | |
721 def create_time(self): | |
722 """The process creation time as a floating point number | |
723 expressed in seconds since the epoch, in UTC. | |
724 The return value is cached after first call. | |
725 """ | |
726 if self._create_time is None: | |
727 self._create_time = self._proc.create_time() | |
728 return self._create_time | |
729 | |
730 def cwd(self): | |
731 """Process current working directory as an absolute path.""" | |
732 return self._proc.cwd() | |
733 | |
734 def nice(self, value=None): | |
735 """Get or set process niceness (priority).""" | |
736 if value is None: | |
737 return self._proc.nice_get() | |
738 else: | |
739 if not self.is_running(): | |
740 raise NoSuchProcess(self.pid, self._name) | |
741 self._proc.nice_set(value) | |
742 | |
743 if POSIX: | |
744 | |
745 @memoize_when_activated | |
746 def uids(self): | |
747 """Return process UIDs as a (real, effective, saved) | |
748 namedtuple. | |
749 """ | |
750 return self._proc.uids() | |
751 | |
752 def gids(self): | |
753 """Return process GIDs as a (real, effective, saved) | |
754 namedtuple. | |
755 """ | |
756 return self._proc.gids() | |
757 | |
758 def terminal(self): | |
759 """The terminal associated with this process, if any, | |
760 else None. | |
761 """ | |
762 return self._proc.terminal() | |
763 | |
764 def num_fds(self): | |
765 """Return the number of file descriptors opened by this | |
766 process (POSIX only). | |
767 """ | |
768 return self._proc.num_fds() | |
769 | |
770 # Linux, BSD, AIX and Windows only | |
771 if hasattr(_psplatform.Process, "io_counters"): | |
772 | |
773 def io_counters(self): | |
774 """Return process I/O statistics as a | |
775 (read_count, write_count, read_bytes, write_bytes) | |
776 namedtuple. | |
777 Those are the number of read/write calls performed and the | |
778 amount of bytes read and written by the process. | |
779 """ | |
780 return self._proc.io_counters() | |
781 | |
782 # Linux and Windows | |
783 if hasattr(_psplatform.Process, "ionice_get"): | |
784 | |
785 def ionice(self, ioclass=None, value=None): | |
786 """Get or set process I/O niceness (priority). | |
787 | |
788 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants. | |
789 *value* is a number which goes from 0 to 7. The higher the | |
790 value, the lower the I/O priority of the process. | |
791 | |
792 On Windows only *ioclass* is used and it can be set to 2 | |
793 (normal), 1 (low) or 0 (very low). | |
794 | |
795 Available on Linux and Windows > Vista only. | |
796 """ | |
797 if ioclass is None: | |
798 if value is not None: | |
799 raise ValueError("'ioclass' argument must be specified") | |
800 return self._proc.ionice_get() | |
801 else: | |
802 return self._proc.ionice_set(ioclass, value) | |
803 | |
804 # Linux only | |
805 if hasattr(_psplatform.Process, "rlimit"): | |
806 | |
807 def rlimit(self, resource, limits=None): | |
808 """Get or set process resource limits as a (soft, hard) | |
809 tuple. | |
810 | |
811 *resource* is one of the RLIMIT_* constants. | |
812 *limits* is supposed to be a (soft, hard) tuple. | |
813 | |
814 See "man prlimit" for further info. | |
815 Available on Linux only. | |
816 """ | |
817 if limits is None: | |
818 return self._proc.rlimit(resource) | |
819 else: | |
820 return self._proc.rlimit(resource, limits) | |
821 | |
822 # Windows, Linux and FreeBSD only | |
823 if hasattr(_psplatform.Process, "cpu_affinity_get"): | |
824 | |
825 def cpu_affinity(self, cpus=None): | |
826 """Get or set process CPU affinity. | |
827 If specified, *cpus* must be a list of CPUs for which you | |
828 want to set the affinity (e.g. [0, 1]). | |
829 If an empty list is passed, all egible CPUs are assumed | |
830 (and set). | |
831 (Windows, Linux and BSD only). | |
832 """ | |
833 if cpus is None: | |
834 return list(set(self._proc.cpu_affinity_get())) | |
835 else: | |
836 if not cpus: | |
837 if hasattr(self._proc, "_get_eligible_cpus"): | |
838 cpus = self._proc._get_eligible_cpus() | |
839 else: | |
840 cpus = tuple(range(len(cpu_times(percpu=True)))) | |
841 self._proc.cpu_affinity_set(list(set(cpus))) | |
842 | |
843 # Linux, FreeBSD, SunOS | |
844 if hasattr(_psplatform.Process, "cpu_num"): | |
845 | |
846 def cpu_num(self): | |
847 """Return what CPU this process is currently running on. | |
848 The returned number should be <= psutil.cpu_count() | |
849 and <= len(psutil.cpu_percent(percpu=True)). | |
850 It may be used in conjunction with | |
851 psutil.cpu_percent(percpu=True) to observe the system | |
852 workload distributed across CPUs. | |
853 """ | |
854 return self._proc.cpu_num() | |
855 | |
856 # Linux, macOS, Windows, Solaris, AIX | |
857 if hasattr(_psplatform.Process, "environ"): | |
858 | |
859 def environ(self): | |
860 """The environment variables of the process as a dict. Note: this | |
861 might not reflect changes made after the process started. """ | |
862 return self._proc.environ() | |
863 | |
864 if WINDOWS: | |
865 | |
866 def num_handles(self): | |
867 """Return the number of handles opened by this process | |
868 (Windows only). | |
869 """ | |
870 return self._proc.num_handles() | |
871 | |
872 def num_ctx_switches(self): | |
873 """Return the number of voluntary and involuntary context | |
874 switches performed by this process. | |
875 """ | |
876 return self._proc.num_ctx_switches() | |
877 | |
878 def num_threads(self): | |
879 """Return the number of threads used by this process.""" | |
880 return self._proc.num_threads() | |
881 | |
882 if hasattr(_psplatform.Process, "threads"): | |
883 | |
884 def threads(self): | |
885 """Return threads opened by process as a list of | |
886 (id, user_time, system_time) namedtuples representing | |
887 thread id and thread CPU times (user/system). | |
888 On OpenBSD this method requires root access. | |
889 """ | |
890 return self._proc.threads() | |
891 | |
892 @_assert_pid_not_reused | |
893 def children(self, recursive=False): | |
894 """Return the children of this process as a list of Process | |
895 instances, pre-emptively checking whether PID has been reused. | |
896 If *recursive* is True return all the parent descendants. | |
897 | |
898 Example (A == this process): | |
899 | |
900 A ─┐ | |
901 │ | |
902 ├─ B (child) ─┐ | |
903 │ └─ X (grandchild) ─┐ | |
904 │ └─ Y (great grandchild) | |
905 ├─ C (child) | |
906 └─ D (child) | |
907 | |
908 >>> import psutil | |
909 >>> p = psutil.Process() | |
910 >>> p.children() | |
911 B, C, D | |
912 >>> p.children(recursive=True) | |
913 B, X, Y, C, D | |
914 | |
915 Note that in the example above if process X disappears | |
916 process Y won't be listed as the reference to process A | |
917 is lost. | |
918 """ | |
919 ppid_map = _ppid_map() | |
920 ret = [] | |
921 if not recursive: | |
922 for pid, ppid in ppid_map.items(): | |
923 if ppid == self.pid: | |
924 try: | |
925 child = Process(pid) | |
926 # if child happens to be older than its parent | |
927 # (self) it means child's PID has been reused | |
928 if self.create_time() <= child.create_time(): | |
929 ret.append(child) | |
930 except (NoSuchProcess, ZombieProcess): | |
931 pass | |
932 else: | |
933 # Construct a {pid: [child pids]} dict | |
934 reverse_ppid_map = collections.defaultdict(list) | |
935 for pid, ppid in ppid_map.items(): | |
936 reverse_ppid_map[ppid].append(pid) | |
937 # Recursively traverse that dict, starting from self.pid, | |
938 # such that we only call Process() on actual children | |
939 seen = set() | |
940 stack = [self.pid] | |
941 while stack: | |
942 pid = stack.pop() | |
943 if pid in seen: | |
944 # Since pids can be reused while the ppid_map is | |
945 # constructed, there may be rare instances where | |
946 # there's a cycle in the recorded process "tree". | |
947 continue | |
948 seen.add(pid) | |
949 for child_pid in reverse_ppid_map[pid]: | |
950 try: | |
951 child = Process(child_pid) | |
952 # if child happens to be older than its parent | |
953 # (self) it means child's PID has been reused | |
954 intime = self.create_time() <= child.create_time() | |
955 if intime: | |
956 ret.append(child) | |
957 stack.append(child_pid) | |
958 except (NoSuchProcess, ZombieProcess): | |
959 pass | |
960 return ret | |
961 | |
962 def cpu_percent(self, interval=None): | |
963 """Return a float representing the current process CPU | |
964 utilization as a percentage. | |
965 | |
966 When *interval* is 0.0 or None (default) compares process times | |
967 to system CPU times elapsed since last call, returning | |
968 immediately (non-blocking). That means that the first time | |
969 this is called it will return a meaningful 0.0 value. | |
970 | |
971 When *interval* is > 0.0 compares process times to system CPU | |
972 times elapsed before and after the interval (blocking). | |
973 | |
974 In this case is recommended for accuracy that this function | |
975 be called with at least 0.1 seconds between calls. | |
976 | |
977 A value > 100.0 can be returned in case of processes running | |
978 multiple threads on different CPU cores. | |
979 | |
980 The returned value is explicitly NOT split evenly between | |
981 all available logical CPUs. This means that a busy loop process | |
982 running on a system with 2 logical CPUs will be reported as | |
983 having 100% CPU utilization instead of 50%. | |
984 | |
985 Examples: | |
986 | |
987 >>> import psutil | |
988 >>> p = psutil.Process(os.getpid()) | |
989 >>> # blocking | |
990 >>> p.cpu_percent(interval=1) | |
991 2.0 | |
992 >>> # non-blocking (percentage since last call) | |
993 >>> p.cpu_percent(interval=None) | |
994 2.9 | |
995 >>> | |
996 """ | |
997 blocking = interval is not None and interval > 0.0 | |
998 if interval is not None and interval < 0: | |
999 raise ValueError("interval is not positive (got %r)" % interval) | |
1000 num_cpus = cpu_count() or 1 | |
1001 | |
1002 def timer(): | |
1003 return _timer() * num_cpus | |
1004 | |
1005 if blocking: | |
1006 st1 = timer() | |
1007 pt1 = self._proc.cpu_times() | |
1008 time.sleep(interval) | |
1009 st2 = timer() | |
1010 pt2 = self._proc.cpu_times() | |
1011 else: | |
1012 st1 = self._last_sys_cpu_times | |
1013 pt1 = self._last_proc_cpu_times | |
1014 st2 = timer() | |
1015 pt2 = self._proc.cpu_times() | |
1016 if st1 is None or pt1 is None: | |
1017 self._last_sys_cpu_times = st2 | |
1018 self._last_proc_cpu_times = pt2 | |
1019 return 0.0 | |
1020 | |
1021 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system) | |
1022 delta_time = st2 - st1 | |
1023 # reset values for next call in case of interval == None | |
1024 self._last_sys_cpu_times = st2 | |
1025 self._last_proc_cpu_times = pt2 | |
1026 | |
1027 try: | |
1028 # This is the utilization split evenly between all CPUs. | |
1029 # E.g. a busy loop process on a 2-CPU-cores system at this | |
1030 # point is reported as 50% instead of 100%. | |
1031 overall_cpus_percent = ((delta_proc / delta_time) * 100) | |
1032 except ZeroDivisionError: | |
1033 # interval was too low | |
1034 return 0.0 | |
1035 else: | |
1036 # Note 1: | |
1037 # in order to emulate "top" we multiply the value for the num | |
1038 # of CPU cores. This way the busy process will be reported as | |
1039 # having 100% (or more) usage. | |
1040 # | |
1041 # Note 2: | |
1042 # taskmgr.exe on Windows differs in that it will show 50% | |
1043 # instead. | |
1044 # | |
1045 # Note 3: | |
1046 # a percentage > 100 is legitimate as it can result from a | |
1047 # process with multiple threads running on different CPU | |
1048 # cores (top does the same), see: | |
1049 # http://stackoverflow.com/questions/1032357 | |
1050 # https://github.com/giampaolo/psutil/issues/474 | |
1051 single_cpu_percent = overall_cpus_percent * num_cpus | |
1052 return round(single_cpu_percent, 1) | |
1053 | |
1054 @memoize_when_activated | |
1055 def cpu_times(self): | |
1056 """Return a (user, system, children_user, children_system) | |
1057 namedtuple representing the accumulated process time, in | |
1058 seconds. | |
1059 This is similar to os.times() but per-process. | |
1060 On macOS and Windows children_user and children_system are | |
1061 always set to 0. | |
1062 """ | |
1063 return self._proc.cpu_times() | |
1064 | |
1065 @memoize_when_activated | |
1066 def memory_info(self): | |
1067 """Return a namedtuple with variable fields depending on the | |
1068 platform, representing memory information about the process. | |
1069 | |
1070 The "portable" fields available on all plaforms are `rss` and `vms`. | |
1071 | |
1072 All numbers are expressed in bytes. | |
1073 """ | |
1074 return self._proc.memory_info() | |
1075 | |
1076 @_common.deprecated_method(replacement="memory_info") | |
1077 def memory_info_ex(self): | |
1078 return self.memory_info() | |
1079 | |
1080 def memory_full_info(self): | |
1081 """This method returns the same information as memory_info(), | |
1082 plus, on some platform (Linux, macOS, Windows), also provides | |
1083 additional metrics (USS, PSS and swap). | |
1084 The additional metrics provide a better representation of actual | |
1085 process memory usage. | |
1086 | |
1087 Namely USS is the memory which is unique to a process and which | |
1088 would be freed if the process was terminated right now. | |
1089 | |
1090 It does so by passing through the whole process address. | |
1091 As such it usually requires higher user privileges than | |
1092 memory_info() and is considerably slower. | |
1093 """ | |
1094 return self._proc.memory_full_info() | |
1095 | |
1096 def memory_percent(self, memtype="rss"): | |
1097 """Compare process memory to total physical system memory and | |
1098 calculate process memory utilization as a percentage. | |
1099 *memtype* argument is a string that dictates what type of | |
1100 process memory you want to compare against (defaults to "rss"). | |
1101 The list of available strings can be obtained like this: | |
1102 | |
1103 >>> psutil.Process().memory_info()._fields | |
1104 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss') | |
1105 """ | |
1106 valid_types = list(_psplatform.pfullmem._fields) | |
1107 if memtype not in valid_types: | |
1108 raise ValueError("invalid memtype %r; valid types are %r" % ( | |
1109 memtype, tuple(valid_types))) | |
1110 fun = self.memory_info if memtype in _psplatform.pmem._fields else \ | |
1111 self.memory_full_info | |
1112 metrics = fun() | |
1113 value = getattr(metrics, memtype) | |
1114 | |
1115 # use cached value if available | |
1116 total_phymem = _TOTAL_PHYMEM or virtual_memory().total | |
1117 if not total_phymem > 0: | |
1118 # we should never get here | |
1119 raise ValueError( | |
1120 "can't calculate process memory percent because " | |
1121 "total physical system memory is not positive (%r)" | |
1122 % total_phymem) | |
1123 return (value / float(total_phymem)) * 100 | |
1124 | |
1125 if hasattr(_psplatform.Process, "memory_maps"): | |
1126 def memory_maps(self, grouped=True): | |
1127 """Return process' mapped memory regions as a list of namedtuples | |
1128 whose fields are variable depending on the platform. | |
1129 | |
1130 If *grouped* is True the mapped regions with the same 'path' | |
1131 are grouped together and the different memory fields are summed. | |
1132 | |
1133 If *grouped* is False every mapped region is shown as a single | |
1134 entity and the namedtuple will also include the mapped region's | |
1135 address space ('addr') and permission set ('perms'). | |
1136 """ | |
1137 it = self._proc.memory_maps() | |
1138 if grouped: | |
1139 d = {} | |
1140 for tupl in it: | |
1141 path = tupl[2] | |
1142 nums = tupl[3:] | |
1143 try: | |
1144 d[path] = map(lambda x, y: x + y, d[path], nums) | |
1145 except KeyError: | |
1146 d[path] = nums | |
1147 nt = _psplatform.pmmap_grouped | |
1148 return [nt(path, *d[path]) for path in d] # NOQA | |
1149 else: | |
1150 nt = _psplatform.pmmap_ext | |
1151 return [nt(*x) for x in it] | |
1152 | |
1153 def open_files(self): | |
1154 """Return files opened by process as a list of | |
1155 (path, fd) namedtuples including the absolute file name | |
1156 and file descriptor number. | |
1157 """ | |
1158 return self._proc.open_files() | |
1159 | |
1160 def connections(self, kind='inet'): | |
1161 """Return socket connections opened by process as a list of | |
1162 (fd, family, type, laddr, raddr, status) namedtuples. | |
1163 The *kind* parameter filters for connections that match the | |
1164 following criteria: | |
1165 | |
1166 +------------+----------------------------------------------------+ | |
1167 | Kind Value | Connections using | | |
1168 +------------+----------------------------------------------------+ | |
1169 | inet | IPv4 and IPv6 | | |
1170 | inet4 | IPv4 | | |
1171 | inet6 | IPv6 | | |
1172 | tcp | TCP | | |
1173 | tcp4 | TCP over IPv4 | | |
1174 | tcp6 | TCP over IPv6 | | |
1175 | udp | UDP | | |
1176 | udp4 | UDP over IPv4 | | |
1177 | udp6 | UDP over IPv6 | | |
1178 | unix | UNIX socket (both UDP and TCP protocols) | | |
1179 | all | the sum of all the possible families and protocols | | |
1180 +------------+----------------------------------------------------+ | |
1181 """ | |
1182 return self._proc.connections(kind) | |
1183 | |
1184 # --- signals | |
1185 | |
1186 if POSIX: | |
1187 def _send_signal(self, sig): | |
1188 assert not self.pid < 0, self.pid | |
1189 if self.pid == 0: | |
1190 # see "man 2 kill" | |
1191 raise ValueError( | |
1192 "preventing sending signal to process with PID 0 as it " | |
1193 "would affect every process in the process group of the " | |
1194 "calling process (os.getpid()) instead of PID 0") | |
1195 try: | |
1196 os.kill(self.pid, sig) | |
1197 except ProcessLookupError: | |
1198 if OPENBSD and pid_exists(self.pid): | |
1199 # We do this because os.kill() lies in case of | |
1200 # zombie processes. | |
1201 raise ZombieProcess(self.pid, self._name, self._ppid) | |
1202 else: | |
1203 self._gone = True | |
1204 raise NoSuchProcess(self.pid, self._name) | |
1205 except PermissionError: | |
1206 raise AccessDenied(self.pid, self._name) | |
1207 | |
1208 @_assert_pid_not_reused | |
1209 def send_signal(self, sig): | |
1210 """Send a signal *sig* to process pre-emptively checking | |
1211 whether PID has been reused (see signal module constants) . | |
1212 On Windows only SIGTERM is valid and is treated as an alias | |
1213 for kill(). | |
1214 """ | |
1215 if POSIX: | |
1216 self._send_signal(sig) | |
1217 else: # pragma: no cover | |
1218 self._proc.send_signal(sig) | |
1219 | |
1220 @_assert_pid_not_reused | |
1221 def suspend(self): | |
1222 """Suspend process execution with SIGSTOP pre-emptively checking | |
1223 whether PID has been reused. | |
1224 On Windows this has the effect ot suspending all process threads. | |
1225 """ | |
1226 if POSIX: | |
1227 self._send_signal(signal.SIGSTOP) | |
1228 else: # pragma: no cover | |
1229 self._proc.suspend() | |
1230 | |
1231 @_assert_pid_not_reused | |
1232 def resume(self): | |
1233 """Resume process execution with SIGCONT pre-emptively checking | |
1234 whether PID has been reused. | |
1235 On Windows this has the effect of resuming all process threads. | |
1236 """ | |
1237 if POSIX: | |
1238 self._send_signal(signal.SIGCONT) | |
1239 else: # pragma: no cover | |
1240 self._proc.resume() | |
1241 | |
1242 @_assert_pid_not_reused | |
1243 def terminate(self): | |
1244 """Terminate the process with SIGTERM pre-emptively checking | |
1245 whether PID has been reused. | |
1246 On Windows this is an alias for kill(). | |
1247 """ | |
1248 if POSIX: | |
1249 self._send_signal(signal.SIGTERM) | |
1250 else: # pragma: no cover | |
1251 self._proc.kill() | |
1252 | |
1253 @_assert_pid_not_reused | |
1254 def kill(self): | |
1255 """Kill the current process with SIGKILL pre-emptively checking | |
1256 whether PID has been reused. | |
1257 """ | |
1258 if POSIX: | |
1259 self._send_signal(signal.SIGKILL) | |
1260 else: # pragma: no cover | |
1261 self._proc.kill() | |
1262 | |
1263 def wait(self, timeout=None): | |
1264 """Wait for process to terminate and, if process is a children | |
1265 of os.getpid(), also return its exit code, else None. | |
1266 On Windows there's no such limitation (exit code is always | |
1267 returned). | |
1268 | |
1269 If the process is already terminated immediately return None | |
1270 instead of raising NoSuchProcess. | |
1271 | |
1272 If *timeout* (in seconds) is specified and process is still | |
1273 alive raise TimeoutExpired. | |
1274 | |
1275 To wait for multiple Process(es) use psutil.wait_procs(). | |
1276 """ | |
1277 if timeout is not None and not timeout >= 0: | |
1278 raise ValueError("timeout must be a positive integer") | |
1279 if self._exitcode is not _SENTINEL: | |
1280 return self._exitcode | |
1281 self._exitcode = self._proc.wait(timeout) | |
1282 return self._exitcode | |
1283 | |
1284 | |
1285 # The valid attr names which can be processed by Process.as_dict(). | |
1286 _as_dict_attrnames = set( | |
1287 [x for x in dir(Process) if not x.startswith('_') and x not in | |
1288 ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait', | |
1289 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit', | |
1290 'memory_info_ex', 'oneshot']]) | |
1291 | |
1292 | |
1293 # ===================================================================== | |
1294 # --- Popen class | |
1295 # ===================================================================== | |
1296 | |
1297 | |
1298 class Popen(Process): | |
1299 """Same as subprocess.Popen, but in addition it provides all | |
1300 psutil.Process methods in a single class. | |
1301 For the following methods which are common to both classes, psutil | |
1302 implementation takes precedence: | |
1303 | |
1304 * send_signal() | |
1305 * terminate() | |
1306 * kill() | |
1307 | |
1308 This is done in order to avoid killing another process in case its | |
1309 PID has been reused, fixing BPO-6973. | |
1310 | |
1311 >>> import psutil | |
1312 >>> from subprocess import PIPE | |
1313 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE) | |
1314 >>> p.name() | |
1315 'python' | |
1316 >>> p.uids() | |
1317 user(real=1000, effective=1000, saved=1000) | |
1318 >>> p.username() | |
1319 'giampaolo' | |
1320 >>> p.communicate() | |
1321 ('hi\n', None) | |
1322 >>> p.terminate() | |
1323 >>> p.wait(timeout=2) | |
1324 0 | |
1325 >>> | |
1326 """ | |
1327 | |
1328 def __init__(self, *args, **kwargs): | |
1329 # Explicitly avoid to raise NoSuchProcess in case the process | |
1330 # spawned by subprocess.Popen terminates too quickly, see: | |
1331 # https://github.com/giampaolo/psutil/issues/193 | |
1332 self.__subproc = subprocess.Popen(*args, **kwargs) | |
1333 self._init(self.__subproc.pid, _ignore_nsp=True) | |
1334 | |
1335 def __dir__(self): | |
1336 return sorted(set(dir(Popen) + dir(subprocess.Popen))) | |
1337 | |
1338 def __enter__(self): | |
1339 if hasattr(self.__subproc, '__enter__'): | |
1340 self.__subproc.__enter__() | |
1341 return self | |
1342 | |
1343 def __exit__(self, *args, **kwargs): | |
1344 if hasattr(self.__subproc, '__exit__'): | |
1345 return self.__subproc.__exit__(*args, **kwargs) | |
1346 else: | |
1347 if self.stdout: | |
1348 self.stdout.close() | |
1349 if self.stderr: | |
1350 self.stderr.close() | |
1351 try: | |
1352 # Flushing a BufferedWriter may raise an error. | |
1353 if self.stdin: | |
1354 self.stdin.close() | |
1355 finally: | |
1356 # Wait for the process to terminate, to avoid zombies. | |
1357 self.wait() | |
1358 | |
1359 def __getattribute__(self, name): | |
1360 try: | |
1361 return object.__getattribute__(self, name) | |
1362 except AttributeError: | |
1363 try: | |
1364 return object.__getattribute__(self.__subproc, name) | |
1365 except AttributeError: | |
1366 raise AttributeError("%s instance has no attribute '%s'" | |
1367 % (self.__class__.__name__, name)) | |
1368 | |
1369 def wait(self, timeout=None): | |
1370 if self.__subproc.returncode is not None: | |
1371 return self.__subproc.returncode | |
1372 ret = super(Popen, self).wait(timeout) | |
1373 self.__subproc.returncode = ret | |
1374 return ret | |
1375 | |
1376 | |
1377 # ===================================================================== | |
1378 # --- system processes related functions | |
1379 # ===================================================================== | |
1380 | |
1381 | |
1382 def pids(): | |
1383 """Return a list of current running PIDs.""" | |
1384 global _LOWEST_PID | |
1385 ret = sorted(_psplatform.pids()) | |
1386 _LOWEST_PID = ret[0] | |
1387 return ret | |
1388 | |
1389 | |
1390 def pid_exists(pid): | |
1391 """Return True if given PID exists in the current process list. | |
1392 This is faster than doing "pid in psutil.pids()" and | |
1393 should be preferred. | |
1394 """ | |
1395 if pid < 0: | |
1396 return False | |
1397 elif pid == 0 and POSIX: | |
1398 # On POSIX we use os.kill() to determine PID existence. | |
1399 # According to "man 2 kill" PID 0 has a special meaning | |
1400 # though: it refers to <<every process in the process | |
1401 # group of the calling process>> and that is not we want | |
1402 # to do here. | |
1403 return pid in pids() | |
1404 else: | |
1405 return _psplatform.pid_exists(pid) | |
1406 | |
1407 | |
1408 _pmap = {} | |
1409 _lock = threading.Lock() | |
1410 | |
1411 | |
1412 def process_iter(attrs=None, ad_value=None): | |
1413 """Return a generator yielding a Process instance for all | |
1414 running processes. | |
1415 | |
1416 Every new Process instance is only created once and then cached | |
1417 into an internal table which is updated every time this is used. | |
1418 | |
1419 Cached Process instances are checked for identity so that you're | |
1420 safe in case a PID has been reused by another process, in which | |
1421 case the cached instance is updated. | |
1422 | |
1423 The sorting order in which processes are yielded is based on | |
1424 their PIDs. | |
1425 | |
1426 *attrs* and *ad_value* have the same meaning as in | |
1427 Process.as_dict(). If *attrs* is specified as_dict() is called | |
1428 and the resulting dict is stored as a 'info' attribute attached | |
1429 to returned Process instance. | |
1430 If *attrs* is an empty list it will retrieve all process info | |
1431 (slow). | |
1432 """ | |
1433 def add(pid): | |
1434 proc = Process(pid) | |
1435 if attrs is not None: | |
1436 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value) | |
1437 with _lock: | |
1438 _pmap[proc.pid] = proc | |
1439 return proc | |
1440 | |
1441 def remove(pid): | |
1442 with _lock: | |
1443 _pmap.pop(pid, None) | |
1444 | |
1445 a = set(pids()) | |
1446 b = set(_pmap.keys()) | |
1447 new_pids = a - b | |
1448 gone_pids = b - a | |
1449 for pid in gone_pids: | |
1450 remove(pid) | |
1451 | |
1452 with _lock: | |
1453 ls = sorted(list(_pmap.items()) + | |
1454 list(dict.fromkeys(new_pids).items())) | |
1455 | |
1456 for pid, proc in ls: | |
1457 try: | |
1458 if proc is None: # new process | |
1459 yield add(pid) | |
1460 else: | |
1461 # use is_running() to check whether PID has been reused by | |
1462 # another process in which case yield a new Process instance | |
1463 if proc.is_running(): | |
1464 if attrs is not None: | |
1465 proc.info = proc.as_dict( | |
1466 attrs=attrs, ad_value=ad_value) | |
1467 yield proc | |
1468 else: | |
1469 yield add(pid) | |
1470 except NoSuchProcess: | |
1471 remove(pid) | |
1472 except AccessDenied: | |
1473 # Process creation time can't be determined hence there's | |
1474 # no way to tell whether the pid of the cached process | |
1475 # has been reused. Just return the cached version. | |
1476 if proc is None and pid in _pmap: | |
1477 try: | |
1478 yield _pmap[pid] | |
1479 except KeyError: | |
1480 # If we get here it is likely that 2 threads were | |
1481 # using process_iter(). | |
1482 pass | |
1483 else: | |
1484 raise | |
1485 | |
1486 | |
1487 def wait_procs(procs, timeout=None, callback=None): | |
1488 """Convenience function which waits for a list of processes to | |
1489 terminate. | |
1490 | |
1491 Return a (gone, alive) tuple indicating which processes | |
1492 are gone and which ones are still alive. | |
1493 | |
1494 The gone ones will have a new *returncode* attribute indicating | |
1495 process exit status (may be None). | |
1496 | |
1497 *callback* is a function which gets called every time a process | |
1498 terminates (a Process instance is passed as callback argument). | |
1499 | |
1500 Function will return as soon as all processes terminate or when | |
1501 *timeout* occurs. | |
1502 Differently from Process.wait() it will not raise TimeoutExpired if | |
1503 *timeout* occurs. | |
1504 | |
1505 Typical use case is: | |
1506 | |
1507 - send SIGTERM to a list of processes | |
1508 - give them some time to terminate | |
1509 - send SIGKILL to those ones which are still alive | |
1510 | |
1511 Example: | |
1512 | |
1513 >>> def on_terminate(proc): | |
1514 ... print("process {} terminated".format(proc)) | |
1515 ... | |
1516 >>> for p in procs: | |
1517 ... p.terminate() | |
1518 ... | |
1519 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate) | |
1520 >>> for p in alive: | |
1521 ... p.kill() | |
1522 """ | |
1523 def check_gone(proc, timeout): | |
1524 try: | |
1525 returncode = proc.wait(timeout=timeout) | |
1526 except TimeoutExpired: | |
1527 pass | |
1528 else: | |
1529 if returncode is not None or not proc.is_running(): | |
1530 # Set new Process instance attribute. | |
1531 proc.returncode = returncode | |
1532 gone.add(proc) | |
1533 if callback is not None: | |
1534 callback(proc) | |
1535 | |
1536 if timeout is not None and not timeout >= 0: | |
1537 msg = "timeout must be a positive integer, got %s" % timeout | |
1538 raise ValueError(msg) | |
1539 gone = set() | |
1540 alive = set(procs) | |
1541 if callback is not None and not callable(callback): | |
1542 raise TypeError("callback %r is not a callable" % callable) | |
1543 if timeout is not None: | |
1544 deadline = _timer() + timeout | |
1545 | |
1546 while alive: | |
1547 if timeout is not None and timeout <= 0: | |
1548 break | |
1549 for proc in alive: | |
1550 # Make sure that every complete iteration (all processes) | |
1551 # will last max 1 sec. | |
1552 # We do this because we don't want to wait too long on a | |
1553 # single process: in case it terminates too late other | |
1554 # processes may disappear in the meantime and their PID | |
1555 # reused. | |
1556 max_timeout = 1.0 / len(alive) | |
1557 if timeout is not None: | |
1558 timeout = min((deadline - _timer()), max_timeout) | |
1559 if timeout <= 0: | |
1560 break | |
1561 check_gone(proc, timeout) | |
1562 else: | |
1563 check_gone(proc, max_timeout) | |
1564 alive = alive - gone | |
1565 | |
1566 if alive: | |
1567 # Last attempt over processes survived so far. | |
1568 # timeout == 0 won't make this function wait any further. | |
1569 for proc in alive: | |
1570 check_gone(proc, 0) | |
1571 alive = alive - gone | |
1572 | |
1573 return (list(gone), list(alive)) | |
1574 | |
1575 | |
1576 # ===================================================================== | |
1577 # --- CPU related functions | |
1578 # ===================================================================== | |
1579 | |
1580 | |
1581 def cpu_count(logical=True): | |
1582 """Return the number of logical CPUs in the system (same as | |
1583 os.cpu_count() in Python 3.4). | |
1584 | |
1585 If *logical* is False return the number of physical cores only | |
1586 (e.g. hyper thread CPUs are excluded). | |
1587 | |
1588 Return None if undetermined. | |
1589 | |
1590 The return value is cached after first call. | |
1591 If desired cache can be cleared like this: | |
1592 | |
1593 >>> psutil.cpu_count.cache_clear() | |
1594 """ | |
1595 if logical: | |
1596 ret = _psplatform.cpu_count_logical() | |
1597 else: | |
1598 ret = _psplatform.cpu_count_physical() | |
1599 if ret is not None and ret < 1: | |
1600 ret = None | |
1601 return ret | |
1602 | |
1603 | |
1604 def cpu_times(percpu=False): | |
1605 """Return system-wide CPU times as a namedtuple. | |
1606 Every CPU time represents the seconds the CPU has spent in the | |
1607 given mode. The namedtuple's fields availability varies depending on the | |
1608 platform: | |
1609 | |
1610 - user | |
1611 - system | |
1612 - idle | |
1613 - nice (UNIX) | |
1614 - iowait (Linux) | |
1615 - irq (Linux, FreeBSD) | |
1616 - softirq (Linux) | |
1617 - steal (Linux >= 2.6.11) | |
1618 - guest (Linux >= 2.6.24) | |
1619 - guest_nice (Linux >= 3.2.0) | |
1620 | |
1621 When *percpu* is True return a list of namedtuples for each CPU. | |
1622 First element of the list refers to first CPU, second element | |
1623 to second CPU and so on. | |
1624 The order of the list is consistent across calls. | |
1625 """ | |
1626 if not percpu: | |
1627 return _psplatform.cpu_times() | |
1628 else: | |
1629 return _psplatform.per_cpu_times() | |
1630 | |
1631 | |
1632 try: | |
1633 _last_cpu_times = cpu_times() | |
1634 except Exception: | |
1635 # Don't want to crash at import time. | |
1636 _last_cpu_times = None | |
1637 | |
1638 try: | |
1639 _last_per_cpu_times = cpu_times(percpu=True) | |
1640 except Exception: | |
1641 # Don't want to crash at import time. | |
1642 _last_per_cpu_times = None | |
1643 | |
1644 | |
1645 def _cpu_tot_time(times): | |
1646 """Given a cpu_time() ntuple calculates the total CPU time | |
1647 (including idle time). | |
1648 """ | |
1649 tot = sum(times) | |
1650 if LINUX: | |
1651 # On Linux guest times are already accounted in "user" or | |
1652 # "nice" times, so we subtract them from total. | |
1653 # Htop does the same. References: | |
1654 # https://github.com/giampaolo/psutil/pull/940 | |
1655 # http://unix.stackexchange.com/questions/178045 | |
1656 # https://github.com/torvalds/linux/blob/ | |
1657 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/ | |
1658 # cputime.c#L158 | |
1659 tot -= getattr(times, "guest", 0) # Linux 2.6.24+ | |
1660 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+ | |
1661 return tot | |
1662 | |
1663 | |
1664 def _cpu_busy_time(times): | |
1665 """Given a cpu_time() ntuple calculates the busy CPU time. | |
1666 We do so by subtracting all idle CPU times. | |
1667 """ | |
1668 busy = _cpu_tot_time(times) | |
1669 busy -= times.idle | |
1670 # Linux: "iowait" is time during which the CPU does not do anything | |
1671 # (waits for IO to complete). On Linux IO wait is *not* accounted | |
1672 # in "idle" time so we subtract it. Htop does the same. | |
1673 # References: | |
1674 # https://github.com/torvalds/linux/blob/ | |
1675 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244 | |
1676 busy -= getattr(times, "iowait", 0) | |
1677 return busy | |
1678 | |
1679 | |
1680 def _cpu_times_deltas(t1, t2): | |
1681 assert t1._fields == t2._fields, (t1, t2) | |
1682 field_deltas = [] | |
1683 for field in _psplatform.scputimes._fields: | |
1684 field_delta = getattr(t2, field) - getattr(t1, field) | |
1685 # CPU times are always supposed to increase over time | |
1686 # or at least remain the same and that's because time | |
1687 # cannot go backwards. | |
1688 # Surprisingly sometimes this might not be the case (at | |
1689 # least on Windows and Linux), see: | |
1690 # https://github.com/giampaolo/psutil/issues/392 | |
1691 # https://github.com/giampaolo/psutil/issues/645 | |
1692 # https://github.com/giampaolo/psutil/issues/1210 | |
1693 # Trim negative deltas to zero to ignore decreasing fields. | |
1694 # top does the same. Reference: | |
1695 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063 | |
1696 field_delta = max(0, field_delta) | |
1697 field_deltas.append(field_delta) | |
1698 return _psplatform.scputimes(*field_deltas) | |
1699 | |
1700 | |
1701 def cpu_percent(interval=None, percpu=False): | |
1702 """Return a float representing the current system-wide CPU | |
1703 utilization as a percentage. | |
1704 | |
1705 When *interval* is > 0.0 compares system CPU times elapsed before | |
1706 and after the interval (blocking). | |
1707 | |
1708 When *interval* is 0.0 or None compares system CPU times elapsed | |
1709 since last call or module import, returning immediately (non | |
1710 blocking). That means the first time this is called it will | |
1711 return a meaningless 0.0 value which you should ignore. | |
1712 In this case is recommended for accuracy that this function be | |
1713 called with at least 0.1 seconds between calls. | |
1714 | |
1715 When *percpu* is True returns a list of floats representing the | |
1716 utilization as a percentage for each CPU. | |
1717 First element of the list refers to first CPU, second element | |
1718 to second CPU and so on. | |
1719 The order of the list is consistent across calls. | |
1720 | |
1721 Examples: | |
1722 | |
1723 >>> # blocking, system-wide | |
1724 >>> psutil.cpu_percent(interval=1) | |
1725 2.0 | |
1726 >>> | |
1727 >>> # blocking, per-cpu | |
1728 >>> psutil.cpu_percent(interval=1, percpu=True) | |
1729 [2.0, 1.0] | |
1730 >>> | |
1731 >>> # non-blocking (percentage since last call) | |
1732 >>> psutil.cpu_percent(interval=None) | |
1733 2.9 | |
1734 >>> | |
1735 """ | |
1736 global _last_cpu_times | |
1737 global _last_per_cpu_times | |
1738 blocking = interval is not None and interval > 0.0 | |
1739 if interval is not None and interval < 0: | |
1740 raise ValueError("interval is not positive (got %r)" % interval) | |
1741 | |
1742 def calculate(t1, t2): | |
1743 times_delta = _cpu_times_deltas(t1, t2) | |
1744 | |
1745 all_delta = _cpu_tot_time(times_delta) | |
1746 busy_delta = _cpu_busy_time(times_delta) | |
1747 | |
1748 try: | |
1749 busy_perc = (busy_delta / all_delta) * 100 | |
1750 except ZeroDivisionError: | |
1751 return 0.0 | |
1752 else: | |
1753 return round(busy_perc, 1) | |
1754 | |
1755 # system-wide usage | |
1756 if not percpu: | |
1757 if blocking: | |
1758 t1 = cpu_times() | |
1759 time.sleep(interval) | |
1760 else: | |
1761 t1 = _last_cpu_times | |
1762 if t1 is None: | |
1763 # Something bad happened at import time. We'll | |
1764 # get a meaningful result on the next call. See: | |
1765 # https://github.com/giampaolo/psutil/pull/715 | |
1766 t1 = cpu_times() | |
1767 _last_cpu_times = cpu_times() | |
1768 return calculate(t1, _last_cpu_times) | |
1769 # per-cpu usage | |
1770 else: | |
1771 ret = [] | |
1772 if blocking: | |
1773 tot1 = cpu_times(percpu=True) | |
1774 time.sleep(interval) | |
1775 else: | |
1776 tot1 = _last_per_cpu_times | |
1777 if tot1 is None: | |
1778 # Something bad happened at import time. We'll | |
1779 # get a meaningful result on the next call. See: | |
1780 # https://github.com/giampaolo/psutil/pull/715 | |
1781 tot1 = cpu_times(percpu=True) | |
1782 _last_per_cpu_times = cpu_times(percpu=True) | |
1783 for t1, t2 in zip(tot1, _last_per_cpu_times): | |
1784 ret.append(calculate(t1, t2)) | |
1785 return ret | |
1786 | |
1787 | |
1788 # Use separate global vars for cpu_times_percent() so that it's | |
1789 # independent from cpu_percent() and they can both be used within | |
1790 # the same program. | |
1791 _last_cpu_times_2 = _last_cpu_times | |
1792 _last_per_cpu_times_2 = _last_per_cpu_times | |
1793 | |
1794 | |
1795 def cpu_times_percent(interval=None, percpu=False): | |
1796 """Same as cpu_percent() but provides utilization percentages | |
1797 for each specific CPU time as is returned by cpu_times(). | |
1798 For instance, on Linux we'll get: | |
1799 | |
1800 >>> cpu_times_percent() | |
1801 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0, | |
1802 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) | |
1803 >>> | |
1804 | |
1805 *interval* and *percpu* arguments have the same meaning as in | |
1806 cpu_percent(). | |
1807 """ | |
1808 global _last_cpu_times_2 | |
1809 global _last_per_cpu_times_2 | |
1810 blocking = interval is not None and interval > 0.0 | |
1811 if interval is not None and interval < 0: | |
1812 raise ValueError("interval is not positive (got %r)" % interval) | |
1813 | |
1814 def calculate(t1, t2): | |
1815 nums = [] | |
1816 times_delta = _cpu_times_deltas(t1, t2) | |
1817 all_delta = _cpu_tot_time(times_delta) | |
1818 # "scale" is the value to multiply each delta with to get percentages. | |
1819 # We use "max" to avoid division by zero (if all_delta is 0, then all | |
1820 # fields are 0 so percentages will be 0 too. all_delta cannot be a | |
1821 # fraction because cpu times are integers) | |
1822 scale = 100.0 / max(1, all_delta) | |
1823 for field_delta in times_delta: | |
1824 field_perc = field_delta * scale | |
1825 field_perc = round(field_perc, 1) | |
1826 # make sure we don't return negative values or values over 100% | |
1827 field_perc = min(max(0.0, field_perc), 100.0) | |
1828 nums.append(field_perc) | |
1829 return _psplatform.scputimes(*nums) | |
1830 | |
1831 # system-wide usage | |
1832 if not percpu: | |
1833 if blocking: | |
1834 t1 = cpu_times() | |
1835 time.sleep(interval) | |
1836 else: | |
1837 t1 = _last_cpu_times_2 | |
1838 if t1 is None: | |
1839 # Something bad happened at import time. We'll | |
1840 # get a meaningful result on the next call. See: | |
1841 # https://github.com/giampaolo/psutil/pull/715 | |
1842 t1 = cpu_times() | |
1843 _last_cpu_times_2 = cpu_times() | |
1844 return calculate(t1, _last_cpu_times_2) | |
1845 # per-cpu usage | |
1846 else: | |
1847 ret = [] | |
1848 if blocking: | |
1849 tot1 = cpu_times(percpu=True) | |
1850 time.sleep(interval) | |
1851 else: | |
1852 tot1 = _last_per_cpu_times_2 | |
1853 if tot1 is None: | |
1854 # Something bad happened at import time. We'll | |
1855 # get a meaningful result on the next call. See: | |
1856 # https://github.com/giampaolo/psutil/pull/715 | |
1857 tot1 = cpu_times(percpu=True) | |
1858 _last_per_cpu_times_2 = cpu_times(percpu=True) | |
1859 for t1, t2 in zip(tot1, _last_per_cpu_times_2): | |
1860 ret.append(calculate(t1, t2)) | |
1861 return ret | |
1862 | |
1863 | |
1864 def cpu_stats(): | |
1865 """Return CPU statistics.""" | |
1866 return _psplatform.cpu_stats() | |
1867 | |
1868 | |
1869 if hasattr(_psplatform, "cpu_freq"): | |
1870 | |
1871 def cpu_freq(percpu=False): | |
1872 """Return CPU frequency as a nameduple including current, | |
1873 min and max frequency expressed in Mhz. | |
1874 | |
1875 If *percpu* is True and the system supports per-cpu frequency | |
1876 retrieval (Linux only) a list of frequencies is returned for | |
1877 each CPU. If not a list with one element is returned. | |
1878 """ | |
1879 ret = _psplatform.cpu_freq() | |
1880 if percpu: | |
1881 return ret | |
1882 else: | |
1883 num_cpus = float(len(ret)) | |
1884 if num_cpus == 0: | |
1885 return None | |
1886 elif num_cpus == 1: | |
1887 return ret[0] | |
1888 else: | |
1889 currs, mins, maxs = 0.0, 0.0, 0.0 | |
1890 set_none = False | |
1891 for cpu in ret: | |
1892 currs += cpu.current | |
1893 # On Linux if /proc/cpuinfo is used min/max are set | |
1894 # to None. | |
1895 if LINUX and cpu.min is None: | |
1896 set_none = True | |
1897 continue | |
1898 mins += cpu.min | |
1899 maxs += cpu.max | |
1900 | |
1901 current = currs / num_cpus | |
1902 | |
1903 if set_none: | |
1904 min_ = max_ = None | |
1905 else: | |
1906 min_ = mins / num_cpus | |
1907 max_ = maxs / num_cpus | |
1908 | |
1909 return _common.scpufreq(current, min_, max_) | |
1910 | |
1911 __all__.append("cpu_freq") | |
1912 | |
1913 | |
1914 if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"): | |
1915 # Perform this hasattr check once on import time to either use the | |
1916 # platform based code or proxy straight from the os module. | |
1917 if hasattr(os, "getloadavg"): | |
1918 getloadavg = os.getloadavg | |
1919 else: | |
1920 getloadavg = _psplatform.getloadavg | |
1921 | |
1922 __all__.append("getloadavg") | |
1923 | |
1924 | |
1925 # ===================================================================== | |
1926 # --- system memory related functions | |
1927 # ===================================================================== | |
1928 | |
1929 | |
1930 def virtual_memory(): | |
1931 """Return statistics about system memory usage as a namedtuple | |
1932 including the following fields, expressed in bytes: | |
1933 | |
1934 - total: | |
1935 total physical memory available. | |
1936 | |
1937 - available: | |
1938 the memory that can be given instantly to processes without the | |
1939 system going into swap. | |
1940 This is calculated by summing different memory values depending | |
1941 on the platform and it is supposed to be used to monitor actual | |
1942 memory usage in a cross platform fashion. | |
1943 | |
1944 - percent: | |
1945 the percentage usage calculated as (total - available) / total * 100 | |
1946 | |
1947 - used: | |
1948 memory used, calculated differently depending on the platform and | |
1949 designed for informational purposes only: | |
1950 macOS: active + wired | |
1951 BSD: active + wired + cached | |
1952 Linux: total - free | |
1953 | |
1954 - free: | |
1955 memory not being used at all (zeroed) that is readily available; | |
1956 note that this doesn't reflect the actual memory available | |
1957 (use 'available' instead) | |
1958 | |
1959 Platform-specific fields: | |
1960 | |
1961 - active (UNIX): | |
1962 memory currently in use or very recently used, and so it is in RAM. | |
1963 | |
1964 - inactive (UNIX): | |
1965 memory that is marked as not used. | |
1966 | |
1967 - buffers (BSD, Linux): | |
1968 cache for things like file system metadata. | |
1969 | |
1970 - cached (BSD, macOS): | |
1971 cache for various things. | |
1972 | |
1973 - wired (macOS, BSD): | |
1974 memory that is marked to always stay in RAM. It is never moved to disk. | |
1975 | |
1976 - shared (BSD): | |
1977 memory that may be simultaneously accessed by multiple processes. | |
1978 | |
1979 The sum of 'used' and 'available' does not necessarily equal total. | |
1980 On Windows 'available' and 'free' are the same. | |
1981 """ | |
1982 global _TOTAL_PHYMEM | |
1983 ret = _psplatform.virtual_memory() | |
1984 # cached for later use in Process.memory_percent() | |
1985 _TOTAL_PHYMEM = ret.total | |
1986 return ret | |
1987 | |
1988 | |
1989 def swap_memory(): | |
1990 """Return system swap memory statistics as a namedtuple including | |
1991 the following fields: | |
1992 | |
1993 - total: total swap memory in bytes | |
1994 - used: used swap memory in bytes | |
1995 - free: free swap memory in bytes | |
1996 - percent: the percentage usage | |
1997 - sin: no. of bytes the system has swapped in from disk (cumulative) | |
1998 - sout: no. of bytes the system has swapped out from disk (cumulative) | |
1999 | |
2000 'sin' and 'sout' on Windows are meaningless and always set to 0. | |
2001 """ | |
2002 return _psplatform.swap_memory() | |
2003 | |
2004 | |
2005 # ===================================================================== | |
2006 # --- disks/paritions related functions | |
2007 # ===================================================================== | |
2008 | |
2009 | |
2010 def disk_usage(path): | |
2011 """Return disk usage statistics about the given *path* as a | |
2012 namedtuple including total, used and free space expressed in bytes | |
2013 plus the percentage usage. | |
2014 """ | |
2015 return _psplatform.disk_usage(path) | |
2016 | |
2017 | |
2018 def disk_partitions(all=False): | |
2019 """Return mounted partitions as a list of | |
2020 (device, mountpoint, fstype, opts) namedtuple. | |
2021 'opts' field is a raw string separated by commas indicating mount | |
2022 options which may vary depending on the platform. | |
2023 | |
2024 If *all* parameter is False return physical devices only and ignore | |
2025 all others. | |
2026 """ | |
2027 return _psplatform.disk_partitions(all) | |
2028 | |
2029 | |
2030 def disk_io_counters(perdisk=False, nowrap=True): | |
2031 """Return system disk I/O statistics as a namedtuple including | |
2032 the following fields: | |
2033 | |
2034 - read_count: number of reads | |
2035 - write_count: number of writes | |
2036 - read_bytes: number of bytes read | |
2037 - write_bytes: number of bytes written | |
2038 - read_time: time spent reading from disk (in ms) | |
2039 - write_time: time spent writing to disk (in ms) | |
2040 | |
2041 Platform specific: | |
2042 | |
2043 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms) | |
2044 - read_merged_count (Linux): number of merged reads | |
2045 - write_merged_count (Linux): number of merged writes | |
2046 | |
2047 If *perdisk* is True return the same information for every | |
2048 physical disk installed on the system as a dictionary | |
2049 with partition names as the keys and the namedtuple | |
2050 described above as the values. | |
2051 | |
2052 If *nowrap* is True it detects and adjust the numbers which overflow | |
2053 and wrap (restart from 0) and add "old value" to "new value" so that | |
2054 the returned numbers will always be increasing or remain the same, | |
2055 but never decrease. | |
2056 "disk_io_counters.cache_clear()" can be used to invalidate the | |
2057 cache. | |
2058 | |
2059 On recent Windows versions 'diskperf -y' command may need to be | |
2060 executed first otherwise this function won't find any disk. | |
2061 """ | |
2062 kwargs = dict(perdisk=perdisk) if LINUX else {} | |
2063 rawdict = _psplatform.disk_io_counters(**kwargs) | |
2064 if not rawdict: | |
2065 return {} if perdisk else None | |
2066 if nowrap: | |
2067 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters') | |
2068 nt = getattr(_psplatform, "sdiskio", _common.sdiskio) | |
2069 if perdisk: | |
2070 for disk, fields in rawdict.items(): | |
2071 rawdict[disk] = nt(*fields) | |
2072 return rawdict | |
2073 else: | |
2074 return nt(*[sum(x) for x in zip(*rawdict.values())]) | |
2075 | |
2076 | |
2077 disk_io_counters.cache_clear = functools.partial( | |
2078 _wrap_numbers.cache_clear, 'psutil.disk_io_counters') | |
2079 disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" | |
2080 | |
2081 | |
2082 # ===================================================================== | |
2083 # --- network related functions | |
2084 # ===================================================================== | |
2085 | |
2086 | |
2087 def net_io_counters(pernic=False, nowrap=True): | |
2088 """Return network I/O statistics as a namedtuple including | |
2089 the following fields: | |
2090 | |
2091 - bytes_sent: number of bytes sent | |
2092 - bytes_recv: number of bytes received | |
2093 - packets_sent: number of packets sent | |
2094 - packets_recv: number of packets received | |
2095 - errin: total number of errors while receiving | |
2096 - errout: total number of errors while sending | |
2097 - dropin: total number of incoming packets which were dropped | |
2098 - dropout: total number of outgoing packets which were dropped | |
2099 (always 0 on macOS and BSD) | |
2100 | |
2101 If *pernic* is True return the same information for every | |
2102 network interface installed on the system as a dictionary | |
2103 with network interface names as the keys and the namedtuple | |
2104 described above as the values. | |
2105 | |
2106 If *nowrap* is True it detects and adjust the numbers which overflow | |
2107 and wrap (restart from 0) and add "old value" to "new value" so that | |
2108 the returned numbers will always be increasing or remain the same, | |
2109 but never decrease. | |
2110 "disk_io_counters.cache_clear()" can be used to invalidate the | |
2111 cache. | |
2112 """ | |
2113 rawdict = _psplatform.net_io_counters() | |
2114 if not rawdict: | |
2115 return {} if pernic else None | |
2116 if nowrap: | |
2117 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters') | |
2118 if pernic: | |
2119 for nic, fields in rawdict.items(): | |
2120 rawdict[nic] = _common.snetio(*fields) | |
2121 return rawdict | |
2122 else: | |
2123 return _common.snetio(*[sum(x) for x in zip(*rawdict.values())]) | |
2124 | |
2125 | |
2126 net_io_counters.cache_clear = functools.partial( | |
2127 _wrap_numbers.cache_clear, 'psutil.net_io_counters') | |
2128 net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" | |
2129 | |
2130 | |
2131 def net_connections(kind='inet'): | |
2132 """Return system-wide socket connections as a list of | |
2133 (fd, family, type, laddr, raddr, status, pid) namedtuples. | |
2134 In case of limited privileges 'fd' and 'pid' may be set to -1 | |
2135 and None respectively. | |
2136 The *kind* parameter filters for connections that fit the | |
2137 following criteria: | |
2138 | |
2139 +------------+----------------------------------------------------+ | |
2140 | Kind Value | Connections using | | |
2141 +------------+----------------------------------------------------+ | |
2142 | inet | IPv4 and IPv6 | | |
2143 | inet4 | IPv4 | | |
2144 | inet6 | IPv6 | | |
2145 | tcp | TCP | | |
2146 | tcp4 | TCP over IPv4 | | |
2147 | tcp6 | TCP over IPv6 | | |
2148 | udp | UDP | | |
2149 | udp4 | UDP over IPv4 | | |
2150 | udp6 | UDP over IPv6 | | |
2151 | unix | UNIX socket (both UDP and TCP protocols) | | |
2152 | all | the sum of all the possible families and protocols | | |
2153 +------------+----------------------------------------------------+ | |
2154 | |
2155 On macOS this function requires root privileges. | |
2156 """ | |
2157 return _psplatform.net_connections(kind) | |
2158 | |
2159 | |
2160 def net_if_addrs(): | |
2161 """Return the addresses associated to each NIC (network interface | |
2162 card) installed on the system as a dictionary whose keys are the | |
2163 NIC names and value is a list of namedtuples for each address | |
2164 assigned to the NIC. Each namedtuple includes 5 fields: | |
2165 | |
2166 - family: can be either socket.AF_INET, socket.AF_INET6 or | |
2167 psutil.AF_LINK, which refers to a MAC address. | |
2168 - address: is the primary address and it is always set. | |
2169 - netmask: and 'broadcast' and 'ptp' may be None. | |
2170 - ptp: stands for "point to point" and references the | |
2171 destination address on a point to point interface | |
2172 (typically a VPN). | |
2173 - broadcast: and *ptp* are mutually exclusive. | |
2174 | |
2175 Note: you can have more than one address of the same family | |
2176 associated with each interface. | |
2177 """ | |
2178 has_enums = sys.version_info >= (3, 4) | |
2179 if has_enums: | |
2180 import socket | |
2181 rawlist = _psplatform.net_if_addrs() | |
2182 rawlist.sort(key=lambda x: x[1]) # sort by family | |
2183 ret = collections.defaultdict(list) | |
2184 for name, fam, addr, mask, broadcast, ptp in rawlist: | |
2185 if has_enums: | |
2186 try: | |
2187 fam = socket.AddressFamily(fam) | |
2188 except ValueError: | |
2189 if WINDOWS and fam == -1: | |
2190 fam = _psplatform.AF_LINK | |
2191 elif (hasattr(_psplatform, "AF_LINK") and | |
2192 _psplatform.AF_LINK == fam): | |
2193 # Linux defines AF_LINK as an alias for AF_PACKET. | |
2194 # We re-set the family here so that repr(family) | |
2195 # will show AF_LINK rather than AF_PACKET | |
2196 fam = _psplatform.AF_LINK | |
2197 if fam == _psplatform.AF_LINK: | |
2198 # The underlying C function may return an incomplete MAC | |
2199 # address in which case we fill it with null bytes, see: | |
2200 # https://github.com/giampaolo/psutil/issues/786 | |
2201 separator = ":" if POSIX else "-" | |
2202 while addr.count(separator) < 5: | |
2203 addr += "%s00" % separator | |
2204 ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp)) | |
2205 return dict(ret) | |
2206 | |
2207 | |
2208 def net_if_stats(): | |
2209 """Return information about each NIC (network interface card) | |
2210 installed on the system as a dictionary whose keys are the | |
2211 NIC names and value is a namedtuple with the following fields: | |
2212 | |
2213 - isup: whether the interface is up (bool) | |
2214 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or | |
2215 NIC_DUPLEX_UNKNOWN | |
2216 - speed: the NIC speed expressed in mega bits (MB); if it can't | |
2217 be determined (e.g. 'localhost') it will be set to 0. | |
2218 - mtu: the maximum transmission unit expressed in bytes. | |
2219 """ | |
2220 return _psplatform.net_if_stats() | |
2221 | |
2222 | |
2223 # ===================================================================== | |
2224 # --- sensors | |
2225 # ===================================================================== | |
2226 | |
2227 | |
2228 # Linux, macOS | |
2229 if hasattr(_psplatform, "sensors_temperatures"): | |
2230 | |
2231 def sensors_temperatures(fahrenheit=False): | |
2232 """Return hardware temperatures. Each entry is a namedtuple | |
2233 representing a certain hardware sensor (it may be a CPU, an | |
2234 hard disk or something else, depending on the OS and its | |
2235 configuration). | |
2236 All temperatures are expressed in celsius unless *fahrenheit* | |
2237 is set to True. | |
2238 """ | |
2239 def convert(n): | |
2240 if n is not None: | |
2241 return (float(n) * 9 / 5) + 32 if fahrenheit else n | |
2242 | |
2243 ret = collections.defaultdict(list) | |
2244 rawdict = _psplatform.sensors_temperatures() | |
2245 | |
2246 for name, values in rawdict.items(): | |
2247 while values: | |
2248 label, current, high, critical = values.pop(0) | |
2249 current = convert(current) | |
2250 high = convert(high) | |
2251 critical = convert(critical) | |
2252 | |
2253 if high and not critical: | |
2254 critical = high | |
2255 elif critical and not high: | |
2256 high = critical | |
2257 | |
2258 ret[name].append( | |
2259 _common.shwtemp(label, current, high, critical)) | |
2260 | |
2261 return dict(ret) | |
2262 | |
2263 __all__.append("sensors_temperatures") | |
2264 | |
2265 | |
2266 # Linux | |
2267 if hasattr(_psplatform, "sensors_fans"): | |
2268 | |
2269 def sensors_fans(): | |
2270 """Return fans speed. Each entry is a namedtuple | |
2271 representing a certain hardware sensor. | |
2272 All speed are expressed in RPM (rounds per minute). | |
2273 """ | |
2274 return _psplatform.sensors_fans() | |
2275 | |
2276 __all__.append("sensors_fans") | |
2277 | |
2278 | |
2279 # Linux, Windows, FreeBSD, macOS | |
2280 if hasattr(_psplatform, "sensors_battery"): | |
2281 | |
2282 def sensors_battery(): | |
2283 """Return battery information. If no battery is installed | |
2284 returns None. | |
2285 | |
2286 - percent: battery power left as a percentage. | |
2287 - secsleft: a rough approximation of how many seconds are left | |
2288 before the battery runs out of power. May be | |
2289 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED. | |
2290 - power_plugged: True if the AC power cable is connected. | |
2291 """ | |
2292 return _psplatform.sensors_battery() | |
2293 | |
2294 __all__.append("sensors_battery") | |
2295 | |
2296 | |
2297 # ===================================================================== | |
2298 # --- other system related functions | |
2299 # ===================================================================== | |
2300 | |
2301 | |
2302 def boot_time(): | |
2303 """Return the system boot time expressed in seconds since the epoch.""" | |
2304 # Note: we are not caching this because it is subject to | |
2305 # system clock updates. | |
2306 return _psplatform.boot_time() | |
2307 | |
2308 | |
2309 def users(): | |
2310 """Return users currently connected on the system as a list of | |
2311 namedtuples including the following fields. | |
2312 | |
2313 - user: the name of the user | |
2314 - terminal: the tty or pseudo-tty associated with the user, if any. | |
2315 - host: the host name associated with the entry, if any. | |
2316 - started: the creation time as a floating point number expressed in | |
2317 seconds since the epoch. | |
2318 """ | |
2319 return _psplatform.users() | |
2320 | |
2321 | |
2322 # ===================================================================== | |
2323 # --- Windows services | |
2324 # ===================================================================== | |
2325 | |
2326 | |
2327 if WINDOWS: | |
2328 | |
2329 def win_service_iter(): | |
2330 """Return a generator yielding a WindowsService instance for all | |
2331 Windows services installed. | |
2332 """ | |
2333 return _psplatform.win_service_iter() | |
2334 | |
2335 def win_service_get(name): | |
2336 """Get a Windows service by *name*. | |
2337 Raise NoSuchProcess if no service with such name exists. | |
2338 """ | |
2339 return _psplatform.win_service_get(name) | |
2340 | |
2341 | |
2342 # ===================================================================== | |
2343 | |
2344 | |
2345 def test(): # pragma: no cover | |
2346 from ._common import bytes2human | |
2347 from ._compat import get_terminal_size | |
2348 | |
2349 today_day = datetime.date.today() | |
2350 templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s" | |
2351 attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times', | |
2352 'create_time', 'memory_info', 'status', 'nice', 'username'] | |
2353 print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE", # NOQA | |
2354 "STATUS", "START", "TIME", "CMDLINE")) | |
2355 for p in process_iter(attrs, ad_value=None): | |
2356 if p.info['create_time']: | |
2357 ctime = datetime.datetime.fromtimestamp(p.info['create_time']) | |
2358 if ctime.date() == today_day: | |
2359 ctime = ctime.strftime("%H:%M") | |
2360 else: | |
2361 ctime = ctime.strftime("%b%d") | |
2362 else: | |
2363 ctime = '' | |
2364 if p.info['cpu_times']: | |
2365 cputime = time.strftime("%M:%S", | |
2366 time.localtime(sum(p.info['cpu_times']))) | |
2367 else: | |
2368 cputime = '' | |
2369 | |
2370 user = p.info['username'] or '' | |
2371 if not user and POSIX: | |
2372 try: | |
2373 user = p.uids()[0] | |
2374 except Error: | |
2375 pass | |
2376 if user and WINDOWS and '\\' in user: | |
2377 user = user.split('\\')[1] | |
2378 user = user[:9] | |
2379 vms = bytes2human(p.info['memory_info'].vms) if \ | |
2380 p.info['memory_info'] is not None else '' | |
2381 rss = bytes2human(p.info['memory_info'].rss) if \ | |
2382 p.info['memory_info'] is not None else '' | |
2383 memp = round(p.info['memory_percent'], 1) if \ | |
2384 p.info['memory_percent'] is not None else '' | |
2385 nice = int(p.info['nice']) if p.info['nice'] else '' | |
2386 if p.info['cmdline']: | |
2387 cmdline = ' '.join(p.info['cmdline']) | |
2388 else: | |
2389 cmdline = p.info['name'] | |
2390 status = p.info['status'][:5] if p.info['status'] else '' | |
2391 | |
2392 line = templ % ( | |
2393 user[:10], | |
2394 p.info['pid'], | |
2395 memp, | |
2396 vms, | |
2397 rss, | |
2398 nice, | |
2399 status, | |
2400 ctime, | |
2401 cputime, | |
2402 cmdline) | |
2403 print(line[:get_terminal_size()[0]]) # NOQA | |
2404 | |
2405 | |
2406 del memoize_when_activated, division | |
2407 if sys.version_info[0] < 3: | |
2408 del num, x | |
2409 | |
2410 if __name__ == "__main__": | |
2411 test() |