Mercurial > repos > shellac > guppy_basecaller
comparison env/lib/python3.7/site-packages/psutil/_pslinux.py @ 0:26e78fe6e8c4 draft
"planemo upload commit c699937486c35866861690329de38ec1a5d9f783"
author | shellac |
---|---|
date | Sat, 02 May 2020 07:14:21 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:26e78fe6e8c4 |
---|---|
1 # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 """Linux platform implementation.""" | |
6 | |
7 from __future__ import division | |
8 | |
9 import base64 | |
10 import collections | |
11 import errno | |
12 import functools | |
13 import glob | |
14 import os | |
15 import re | |
16 import socket | |
17 import struct | |
18 import sys | |
19 import traceback | |
20 import warnings | |
21 from collections import defaultdict | |
22 from collections import namedtuple | |
23 | |
24 from . import _common | |
25 from . import _psposix | |
26 from . import _psutil_linux as cext | |
27 from . import _psutil_posix as cext_posix | |
28 from ._common import AccessDenied | |
29 from ._common import debug | |
30 from ._common import decode | |
31 from ._common import get_procfs_path | |
32 from ._common import isfile_strict | |
33 from ._common import memoize | |
34 from ._common import memoize_when_activated | |
35 from ._common import NIC_DUPLEX_FULL | |
36 from ._common import NIC_DUPLEX_HALF | |
37 from ._common import NIC_DUPLEX_UNKNOWN | |
38 from ._common import NoSuchProcess | |
39 from ._common import open_binary | |
40 from ._common import open_text | |
41 from ._common import parse_environ_block | |
42 from ._common import path_exists_strict | |
43 from ._common import supports_ipv6 | |
44 from ._common import usage_percent | |
45 from ._common import ZombieProcess | |
46 from ._compat import b | |
47 from ._compat import basestring | |
48 from ._compat import FileNotFoundError | |
49 from ._compat import PermissionError | |
50 from ._compat import ProcessLookupError | |
51 from ._compat import PY3 | |
52 | |
53 if sys.version_info >= (3, 4): | |
54 import enum | |
55 else: | |
56 enum = None | |
57 | |
58 | |
59 __extra__all__ = [ | |
60 # | |
61 'PROCFS_PATH', | |
62 # io prio constants | |
63 "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE", | |
64 "IOPRIO_CLASS_IDLE", | |
65 # connection status constants | |
66 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", | |
67 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", | |
68 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", ] | |
69 | |
70 | |
71 # ===================================================================== | |
72 # --- globals | |
73 # ===================================================================== | |
74 | |
75 | |
76 POWER_SUPPLY_PATH = "/sys/class/power_supply" | |
77 HAS_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid()) | |
78 HAS_PRLIMIT = hasattr(cext, "linux_prlimit") | |
79 HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get") | |
80 HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get") | |
81 _DEFAULT = object() | |
82 | |
83 # RLIMIT_* constants, not guaranteed to be present on all kernels | |
84 if HAS_PRLIMIT: | |
85 for name in dir(cext): | |
86 if name.startswith('RLIM'): | |
87 __extra__all__.append(name) | |
88 | |
89 # Number of clock ticks per second | |
90 CLOCK_TICKS = os.sysconf("SC_CLK_TCK") | |
91 PAGESIZE = os.sysconf("SC_PAGE_SIZE") | |
92 BOOT_TIME = None # set later | |
93 # Used when reading "big" files, namely /proc/{pid}/smaps and /proc/net/*. | |
94 # On Python 2, using a buffer with open() for such files may result in a | |
95 # speedup, see: https://github.com/giampaolo/psutil/issues/708 | |
96 BIGFILE_BUFFERING = -1 if PY3 else 8192 | |
97 LITTLE_ENDIAN = sys.byteorder == 'little' | |
98 | |
99 # "man iostat" states that sectors are equivalent with blocks and have | |
100 # a size of 512 bytes. Despite this value can be queried at runtime | |
101 # via /sys/block/{DISK}/queue/hw_sector_size and results may vary | |
102 # between 1k, 2k, or 4k... 512 appears to be a magic constant used | |
103 # throughout Linux source code: | |
104 # * https://stackoverflow.com/a/38136179/376587 | |
105 # * https://lists.gt.net/linux/kernel/2241060 | |
106 # * https://github.com/giampaolo/psutil/issues/1305 | |
107 # * https://github.com/torvalds/linux/blob/ | |
108 # 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99 | |
109 # * https://lkml.org/lkml/2015/8/17/234 | |
110 DISK_SECTOR_SIZE = 512 | |
111 | |
112 if enum is None: | |
113 AF_LINK = socket.AF_PACKET | |
114 else: | |
115 AddressFamily = enum.IntEnum('AddressFamily', | |
116 {'AF_LINK': int(socket.AF_PACKET)}) | |
117 AF_LINK = AddressFamily.AF_LINK | |
118 | |
119 # ioprio_* constants http://linux.die.net/man/2/ioprio_get | |
120 if enum is None: | |
121 IOPRIO_CLASS_NONE = 0 | |
122 IOPRIO_CLASS_RT = 1 | |
123 IOPRIO_CLASS_BE = 2 | |
124 IOPRIO_CLASS_IDLE = 3 | |
125 else: | |
126 class IOPriority(enum.IntEnum): | |
127 IOPRIO_CLASS_NONE = 0 | |
128 IOPRIO_CLASS_RT = 1 | |
129 IOPRIO_CLASS_BE = 2 | |
130 IOPRIO_CLASS_IDLE = 3 | |
131 | |
132 globals().update(IOPriority.__members__) | |
133 | |
134 # See: | |
135 # https://github.com/torvalds/linux/blame/master/fs/proc/array.c | |
136 # ...and (TASK_* constants): | |
137 # https://github.com/torvalds/linux/blob/master/include/linux/sched.h | |
138 PROC_STATUSES = { | |
139 "R": _common.STATUS_RUNNING, | |
140 "S": _common.STATUS_SLEEPING, | |
141 "D": _common.STATUS_DISK_SLEEP, | |
142 "T": _common.STATUS_STOPPED, | |
143 "t": _common.STATUS_TRACING_STOP, | |
144 "Z": _common.STATUS_ZOMBIE, | |
145 "X": _common.STATUS_DEAD, | |
146 "x": _common.STATUS_DEAD, | |
147 "K": _common.STATUS_WAKE_KILL, | |
148 "W": _common.STATUS_WAKING, | |
149 "I": _common.STATUS_IDLE, | |
150 "P": _common.STATUS_PARKED, | |
151 } | |
152 | |
153 # https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h | |
154 TCP_STATUSES = { | |
155 "01": _common.CONN_ESTABLISHED, | |
156 "02": _common.CONN_SYN_SENT, | |
157 "03": _common.CONN_SYN_RECV, | |
158 "04": _common.CONN_FIN_WAIT1, | |
159 "05": _common.CONN_FIN_WAIT2, | |
160 "06": _common.CONN_TIME_WAIT, | |
161 "07": _common.CONN_CLOSE, | |
162 "08": _common.CONN_CLOSE_WAIT, | |
163 "09": _common.CONN_LAST_ACK, | |
164 "0A": _common.CONN_LISTEN, | |
165 "0B": _common.CONN_CLOSING | |
166 } | |
167 | |
168 | |
169 # ===================================================================== | |
170 # --- named tuples | |
171 # ===================================================================== | |
172 | |
173 | |
174 # psutil.virtual_memory() | |
175 svmem = namedtuple( | |
176 'svmem', ['total', 'available', 'percent', 'used', 'free', | |
177 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab']) | |
178 # psutil.disk_io_counters() | |
179 sdiskio = namedtuple( | |
180 'sdiskio', ['read_count', 'write_count', | |
181 'read_bytes', 'write_bytes', | |
182 'read_time', 'write_time', | |
183 'read_merged_count', 'write_merged_count', | |
184 'busy_time']) | |
185 # psutil.Process().open_files() | |
186 popenfile = namedtuple( | |
187 'popenfile', ['path', 'fd', 'position', 'mode', 'flags']) | |
188 # psutil.Process().memory_info() | |
189 pmem = namedtuple('pmem', 'rss vms shared text lib data dirty') | |
190 # psutil.Process().memory_full_info() | |
191 pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap')) | |
192 # psutil.Process().memory_maps(grouped=True) | |
193 pmmap_grouped = namedtuple( | |
194 'pmmap_grouped', | |
195 ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty', | |
196 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap']) | |
197 # psutil.Process().memory_maps(grouped=False) | |
198 pmmap_ext = namedtuple( | |
199 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) | |
200 # psutil.Process.io_counters() | |
201 pio = namedtuple('pio', ['read_count', 'write_count', | |
202 'read_bytes', 'write_bytes', | |
203 'read_chars', 'write_chars']) | |
204 # psutil.Process.cpu_times() | |
205 pcputimes = namedtuple('pcputimes', | |
206 ['user', 'system', 'children_user', 'children_system', | |
207 'iowait']) | |
208 | |
209 | |
210 # ===================================================================== | |
211 # --- utils | |
212 # ===================================================================== | |
213 | |
214 | |
215 def readlink(path): | |
216 """Wrapper around os.readlink().""" | |
217 assert isinstance(path, basestring), path | |
218 path = os.readlink(path) | |
219 # readlink() might return paths containing null bytes ('\x00') | |
220 # resulting in "TypeError: must be encoded string without NULL | |
221 # bytes, not str" errors when the string is passed to other | |
222 # fs-related functions (os.*, open(), ...). | |
223 # Apparently everything after '\x00' is garbage (we can have | |
224 # ' (deleted)', 'new' and possibly others), see: | |
225 # https://github.com/giampaolo/psutil/issues/717 | |
226 path = path.split('\x00')[0] | |
227 # Certain paths have ' (deleted)' appended. Usually this is | |
228 # bogus as the file actually exists. Even if it doesn't we | |
229 # don't care. | |
230 if path.endswith(' (deleted)') and not path_exists_strict(path): | |
231 path = path[:-10] | |
232 return path | |
233 | |
234 | |
235 def file_flags_to_mode(flags): | |
236 """Convert file's open() flags into a readable string. | |
237 Used by Process.open_files(). | |
238 """ | |
239 modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'} | |
240 mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)] | |
241 if flags & os.O_APPEND: | |
242 mode = mode.replace('w', 'a', 1) | |
243 mode = mode.replace('w+', 'r+') | |
244 # possible values: r, w, a, r+, a+ | |
245 return mode | |
246 | |
247 | |
248 def is_storage_device(name): | |
249 """Return True if the given name refers to a root device (e.g. | |
250 "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1", | |
251 "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram") | |
252 return True. | |
253 """ | |
254 # Readapted from iostat source code, see: | |
255 # https://github.com/sysstat/sysstat/blob/ | |
256 # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208 | |
257 # Some devices may have a slash in their name (e.g. cciss/c0d0...). | |
258 name = name.replace('/', '!') | |
259 including_virtual = True | |
260 if including_virtual: | |
261 path = "/sys/block/%s" % name | |
262 else: | |
263 path = "/sys/block/%s/device" % name | |
264 return os.access(path, os.F_OK) | |
265 | |
266 | |
267 @memoize | |
268 def set_scputimes_ntuple(procfs_path): | |
269 """Set a namedtuple of variable fields depending on the CPU times | |
270 available on this Linux kernel version which may be: | |
271 (user, nice, system, idle, iowait, irq, softirq, [steal, [guest, | |
272 [guest_nice]]]) | |
273 Used by cpu_times() function. | |
274 """ | |
275 global scputimes | |
276 with open_binary('%s/stat' % procfs_path) as f: | |
277 values = f.readline().split()[1:] | |
278 fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] | |
279 vlen = len(values) | |
280 if vlen >= 8: | |
281 # Linux >= 2.6.11 | |
282 fields.append('steal') | |
283 if vlen >= 9: | |
284 # Linux >= 2.6.24 | |
285 fields.append('guest') | |
286 if vlen >= 10: | |
287 # Linux >= 3.2.0 | |
288 fields.append('guest_nice') | |
289 scputimes = namedtuple('scputimes', fields) | |
290 | |
291 | |
292 def cat(fname, fallback=_DEFAULT, binary=True): | |
293 """Return file content. | |
294 fallback: the value returned in case the file does not exist or | |
295 cannot be read | |
296 binary: whether to open the file in binary or text mode. | |
297 """ | |
298 try: | |
299 with open_binary(fname) if binary else open_text(fname) as f: | |
300 return f.read().strip() | |
301 except (IOError, OSError): | |
302 if fallback is not _DEFAULT: | |
303 return fallback | |
304 else: | |
305 raise | |
306 | |
307 | |
308 try: | |
309 set_scputimes_ntuple("/proc") | |
310 except Exception: | |
311 # Don't want to crash at import time. | |
312 traceback.print_exc() | |
313 scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0) | |
314 | |
315 | |
316 # ===================================================================== | |
317 # --- system memory | |
318 # ===================================================================== | |
319 | |
320 | |
321 def calculate_avail_vmem(mems): | |
322 """Fallback for kernels < 3.14 where /proc/meminfo does not provide | |
323 "MemAvailable:" column, see: | |
324 https://blog.famzah.net/2014/09/24/ | |
325 This code reimplements the algorithm outlined here: | |
326 https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ | |
327 commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 | |
328 | |
329 XXX: on recent kernels this calculation differs by ~1.5% than | |
330 "MemAvailable:" as it's calculated slightly differently, see: | |
331 https://gitlab.com/procps-ng/procps/issues/42 | |
332 https://github.com/famzah/linux-memavailable-procfs/issues/2 | |
333 It is still way more realistic than doing (free + cached) though. | |
334 """ | |
335 # Fallback for very old distros. According to | |
336 # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ | |
337 # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 | |
338 # ...long ago "avail" was calculated as (free + cached). | |
339 # We might fallback in such cases: | |
340 # "Active(file)" not available: 2.6.28 / Dec 2008 | |
341 # "Inactive(file)" not available: 2.6.28 / Dec 2008 | |
342 # "SReclaimable:" not available: 2.6.19 / Nov 2006 | |
343 # /proc/zoneinfo not available: 2.6.13 / Aug 2005 | |
344 free = mems[b'MemFree:'] | |
345 fallback = free + mems.get(b"Cached:", 0) | |
346 try: | |
347 lru_active_file = mems[b'Active(file):'] | |
348 lru_inactive_file = mems[b'Inactive(file):'] | |
349 slab_reclaimable = mems[b'SReclaimable:'] | |
350 except KeyError: | |
351 return fallback | |
352 try: | |
353 f = open_binary('%s/zoneinfo' % get_procfs_path()) | |
354 except IOError: | |
355 return fallback # kernel 2.6.13 | |
356 | |
357 watermark_low = 0 | |
358 with f: | |
359 for line in f: | |
360 line = line.strip() | |
361 if line.startswith(b'low'): | |
362 watermark_low += int(line.split()[1]) | |
363 watermark_low *= PAGESIZE | |
364 watermark_low = watermark_low | |
365 | |
366 avail = free - watermark_low | |
367 pagecache = lru_active_file + lru_inactive_file | |
368 pagecache -= min(pagecache / 2, watermark_low) | |
369 avail += pagecache | |
370 avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low) | |
371 return int(avail) | |
372 | |
373 | |
374 def virtual_memory(): | |
375 """Report virtual memory stats. | |
376 This implementation matches "free" and "vmstat -s" cmdline | |
377 utility values and procps-ng-3.3.12 source was used as a reference | |
378 (2016-09-18): | |
379 https://gitlab.com/procps-ng/procps/blob/ | |
380 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c | |
381 For reference, procps-ng-3.3.10 is the version available on Ubuntu | |
382 16.04. | |
383 | |
384 Note about "available" memory: up until psutil 4.3 it was | |
385 calculated as "avail = (free + buffers + cached)". Now | |
386 "MemAvailable:" column (kernel 3.14) from /proc/meminfo is used as | |
387 it's more accurate. | |
388 That matches "available" column in newer versions of "free". | |
389 """ | |
390 missing_fields = [] | |
391 mems = {} | |
392 with open_binary('%s/meminfo' % get_procfs_path()) as f: | |
393 for line in f: | |
394 fields = line.split() | |
395 mems[fields[0]] = int(fields[1]) * 1024 | |
396 | |
397 # /proc doc states that the available fields in /proc/meminfo vary | |
398 # by architecture and compile options, but these 3 values are also | |
399 # returned by sysinfo(2); as such we assume they are always there. | |
400 total = mems[b'MemTotal:'] | |
401 free = mems[b'MemFree:'] | |
402 try: | |
403 buffers = mems[b'Buffers:'] | |
404 except KeyError: | |
405 # https://github.com/giampaolo/psutil/issues/1010 | |
406 buffers = 0 | |
407 missing_fields.append('buffers') | |
408 try: | |
409 cached = mems[b"Cached:"] | |
410 except KeyError: | |
411 cached = 0 | |
412 missing_fields.append('cached') | |
413 else: | |
414 # "free" cmdline utility sums reclaimable to cached. | |
415 # Older versions of procps used to add slab memory instead. | |
416 # This got changed in: | |
417 # https://gitlab.com/procps-ng/procps/commit/ | |
418 # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e | |
419 cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19 | |
420 | |
421 try: | |
422 shared = mems[b'Shmem:'] # since kernel 2.6.32 | |
423 except KeyError: | |
424 try: | |
425 shared = mems[b'MemShared:'] # kernels 2.4 | |
426 except KeyError: | |
427 shared = 0 | |
428 missing_fields.append('shared') | |
429 | |
430 try: | |
431 active = mems[b"Active:"] | |
432 except KeyError: | |
433 active = 0 | |
434 missing_fields.append('active') | |
435 | |
436 try: | |
437 inactive = mems[b"Inactive:"] | |
438 except KeyError: | |
439 try: | |
440 inactive = \ | |
441 mems[b"Inact_dirty:"] + \ | |
442 mems[b"Inact_clean:"] + \ | |
443 mems[b"Inact_laundry:"] | |
444 except KeyError: | |
445 inactive = 0 | |
446 missing_fields.append('inactive') | |
447 | |
448 try: | |
449 slab = mems[b"Slab:"] | |
450 except KeyError: | |
451 slab = 0 | |
452 | |
453 used = total - free - cached - buffers | |
454 if used < 0: | |
455 # May be symptomatic of running within a LCX container where such | |
456 # values will be dramatically distorted over those of the host. | |
457 used = total - free | |
458 | |
459 # - starting from 4.4.0 we match free's "available" column. | |
460 # Before 4.4.0 we calculated it as (free + buffers + cached) | |
461 # which matched htop. | |
462 # - free and htop available memory differs as per: | |
463 # http://askubuntu.com/a/369589 | |
464 # http://unix.stackexchange.com/a/65852/168884 | |
465 # - MemAvailable has been introduced in kernel 3.14 | |
466 try: | |
467 avail = mems[b'MemAvailable:'] | |
468 except KeyError: | |
469 avail = calculate_avail_vmem(mems) | |
470 | |
471 if avail < 0: | |
472 avail = 0 | |
473 missing_fields.append('available') | |
474 | |
475 # If avail is greater than total or our calculation overflows, | |
476 # that's symptomatic of running within a LCX container where such | |
477 # values will be dramatically distorted over those of the host. | |
478 # https://gitlab.com/procps-ng/procps/blob/ | |
479 # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764 | |
480 if avail > total: | |
481 avail = free | |
482 | |
483 percent = usage_percent((total - avail), total, round_=1) | |
484 | |
485 # Warn about missing metrics which are set to 0. | |
486 if missing_fields: | |
487 msg = "%s memory stats couldn't be determined and %s set to 0" % ( | |
488 ", ".join(missing_fields), | |
489 "was" if len(missing_fields) == 1 else "were") | |
490 warnings.warn(msg, RuntimeWarning) | |
491 | |
492 return svmem(total, avail, percent, used, free, | |
493 active, inactive, buffers, cached, shared, slab) | |
494 | |
495 | |
496 def swap_memory(): | |
497 """Return swap memory metrics.""" | |
498 mems = {} | |
499 with open_binary('%s/meminfo' % get_procfs_path()) as f: | |
500 for line in f: | |
501 fields = line.split() | |
502 mems[fields[0]] = int(fields[1]) * 1024 | |
503 # We prefer /proc/meminfo over sysinfo() syscall so that | |
504 # psutil.PROCFS_PATH can be used in order to allow retrieval | |
505 # for linux containers, see: | |
506 # https://github.com/giampaolo/psutil/issues/1015 | |
507 try: | |
508 total = mems[b'SwapTotal:'] | |
509 free = mems[b'SwapFree:'] | |
510 except KeyError: | |
511 _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() | |
512 total *= unit_multiplier | |
513 free *= unit_multiplier | |
514 | |
515 used = total - free | |
516 percent = usage_percent(used, total, round_=1) | |
517 # get pgin/pgouts | |
518 try: | |
519 f = open_binary("%s/vmstat" % get_procfs_path()) | |
520 except IOError as err: | |
521 # see https://github.com/giampaolo/psutil/issues/722 | |
522 msg = "'sin' and 'sout' swap memory stats couldn't " \ | |
523 "be determined and were set to 0 (%s)" % str(err) | |
524 warnings.warn(msg, RuntimeWarning) | |
525 sin = sout = 0 | |
526 else: | |
527 with f: | |
528 sin = sout = None | |
529 for line in f: | |
530 # values are expressed in 4 kilo bytes, we want | |
531 # bytes instead | |
532 if line.startswith(b'pswpin'): | |
533 sin = int(line.split(b' ')[1]) * 4 * 1024 | |
534 elif line.startswith(b'pswpout'): | |
535 sout = int(line.split(b' ')[1]) * 4 * 1024 | |
536 if sin is not None and sout is not None: | |
537 break | |
538 else: | |
539 # we might get here when dealing with exotic Linux | |
540 # flavors, see: | |
541 # https://github.com/giampaolo/psutil/issues/313 | |
542 msg = "'sin' and 'sout' swap memory stats couldn't " \ | |
543 "be determined and were set to 0" | |
544 warnings.warn(msg, RuntimeWarning) | |
545 sin = sout = 0 | |
546 return _common.sswap(total, used, free, percent, sin, sout) | |
547 | |
548 | |
549 # ===================================================================== | |
550 # --- CPU | |
551 # ===================================================================== | |
552 | |
553 | |
554 def cpu_times(): | |
555 """Return a named tuple representing the following system-wide | |
556 CPU times: | |
557 (user, nice, system, idle, iowait, irq, softirq [steal, [guest, | |
558 [guest_nice]]]) | |
559 Last 3 fields may not be available on all Linux kernel versions. | |
560 """ | |
561 procfs_path = get_procfs_path() | |
562 set_scputimes_ntuple(procfs_path) | |
563 with open_binary('%s/stat' % procfs_path) as f: | |
564 values = f.readline().split() | |
565 fields = values[1:len(scputimes._fields) + 1] | |
566 fields = [float(x) / CLOCK_TICKS for x in fields] | |
567 return scputimes(*fields) | |
568 | |
569 | |
570 def per_cpu_times(): | |
571 """Return a list of namedtuple representing the CPU times | |
572 for every CPU available on the system. | |
573 """ | |
574 procfs_path = get_procfs_path() | |
575 set_scputimes_ntuple(procfs_path) | |
576 cpus = [] | |
577 with open_binary('%s/stat' % procfs_path) as f: | |
578 # get rid of the first line which refers to system wide CPU stats | |
579 f.readline() | |
580 for line in f: | |
581 if line.startswith(b'cpu'): | |
582 values = line.split() | |
583 fields = values[1:len(scputimes._fields) + 1] | |
584 fields = [float(x) / CLOCK_TICKS for x in fields] | |
585 entry = scputimes(*fields) | |
586 cpus.append(entry) | |
587 return cpus | |
588 | |
589 | |
590 def cpu_count_logical(): | |
591 """Return the number of logical CPUs in the system.""" | |
592 try: | |
593 return os.sysconf("SC_NPROCESSORS_ONLN") | |
594 except ValueError: | |
595 # as a second fallback we try to parse /proc/cpuinfo | |
596 num = 0 | |
597 with open_binary('%s/cpuinfo' % get_procfs_path()) as f: | |
598 for line in f: | |
599 if line.lower().startswith(b'processor'): | |
600 num += 1 | |
601 | |
602 # unknown format (e.g. amrel/sparc architectures), see: | |
603 # https://github.com/giampaolo/psutil/issues/200 | |
604 # try to parse /proc/stat as a last resort | |
605 if num == 0: | |
606 search = re.compile(r'cpu\d') | |
607 with open_text('%s/stat' % get_procfs_path()) as f: | |
608 for line in f: | |
609 line = line.split(' ')[0] | |
610 if search.match(line): | |
611 num += 1 | |
612 | |
613 if num == 0: | |
614 # mimic os.cpu_count() | |
615 return None | |
616 return num | |
617 | |
618 | |
619 def cpu_count_physical(): | |
620 """Return the number of physical cores in the system.""" | |
621 # Method #1 | |
622 core_ids = set() | |
623 for path in glob.glob( | |
624 "/sys/devices/system/cpu/cpu[0-9]*/topology/core_id"): | |
625 with open_binary(path) as f: | |
626 core_ids.add(int(f.read())) | |
627 result = len(core_ids) | |
628 if result != 0: | |
629 return result | |
630 | |
631 # Method #2 | |
632 mapping = {} | |
633 current_info = {} | |
634 with open_binary('%s/cpuinfo' % get_procfs_path()) as f: | |
635 for line in f: | |
636 line = line.strip().lower() | |
637 if not line: | |
638 # new section | |
639 if (b'physical id' in current_info and | |
640 b'cpu cores' in current_info): | |
641 mapping[current_info[b'physical id']] = \ | |
642 current_info[b'cpu cores'] | |
643 current_info = {} | |
644 else: | |
645 # ongoing section | |
646 if (line.startswith(b'physical id') or | |
647 line.startswith(b'cpu cores')): | |
648 key, value = line.split(b'\t:', 1) | |
649 current_info[key] = int(value) | |
650 | |
651 result = sum(mapping.values()) | |
652 return result or None # mimic os.cpu_count() | |
653 | |
654 | |
655 def cpu_stats(): | |
656 """Return various CPU stats as a named tuple.""" | |
657 with open_binary('%s/stat' % get_procfs_path()) as f: | |
658 ctx_switches = None | |
659 interrupts = None | |
660 soft_interrupts = None | |
661 for line in f: | |
662 if line.startswith(b'ctxt'): | |
663 ctx_switches = int(line.split()[1]) | |
664 elif line.startswith(b'intr'): | |
665 interrupts = int(line.split()[1]) | |
666 elif line.startswith(b'softirq'): | |
667 soft_interrupts = int(line.split()[1]) | |
668 if ctx_switches is not None and soft_interrupts is not None \ | |
669 and interrupts is not None: | |
670 break | |
671 syscalls = 0 | |
672 return _common.scpustats( | |
673 ctx_switches, interrupts, soft_interrupts, syscalls) | |
674 | |
675 | |
676 if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or \ | |
677 os.path.exists("/sys/devices/system/cpu/cpu0/cpufreq"): | |
678 def cpu_freq(): | |
679 """Return frequency metrics for all CPUs. | |
680 Contrarily to other OSes, Linux updates these values in | |
681 real-time. | |
682 """ | |
683 def get_path(num): | |
684 for p in ("/sys/devices/system/cpu/cpufreq/policy%s" % num, | |
685 "/sys/devices/system/cpu/cpu%s/cpufreq" % num): | |
686 if os.path.exists(p): | |
687 return p | |
688 | |
689 ret = [] | |
690 for n in range(cpu_count_logical()): | |
691 path = get_path(n) | |
692 if not path: | |
693 continue | |
694 | |
695 pjoin = os.path.join | |
696 curr = cat(pjoin(path, "scaling_cur_freq"), fallback=None) | |
697 if curr is None: | |
698 # Likely an old RedHat, see: | |
699 # https://github.com/giampaolo/psutil/issues/1071 | |
700 curr = cat(pjoin(path, "cpuinfo_cur_freq"), fallback=None) | |
701 if curr is None: | |
702 raise NotImplementedError( | |
703 "can't find current frequency file") | |
704 curr = int(curr) / 1000 | |
705 max_ = int(cat(pjoin(path, "scaling_max_freq"))) / 1000 | |
706 min_ = int(cat(pjoin(path, "scaling_min_freq"))) / 1000 | |
707 ret.append(_common.scpufreq(curr, min_, max_)) | |
708 return ret | |
709 | |
710 elif os.path.exists("/proc/cpuinfo"): | |
711 def cpu_freq(): | |
712 """Alternate implementation using /proc/cpuinfo. | |
713 min and max frequencies are not available and are set to None. | |
714 """ | |
715 ret = [] | |
716 with open_binary('%s/cpuinfo' % get_procfs_path()) as f: | |
717 for line in f: | |
718 if line.lower().startswith(b'cpu mhz'): | |
719 key, value = line.split(b'\t:', 1) | |
720 ret.append(_common.scpufreq(float(value), 0., 0.)) | |
721 return ret | |
722 | |
723 else: | |
724 def cpu_freq(): | |
725 """Dummy implementation when none of the above files are present. | |
726 """ | |
727 return [] | |
728 | |
729 | |
730 # ===================================================================== | |
731 # --- network | |
732 # ===================================================================== | |
733 | |
734 | |
735 net_if_addrs = cext_posix.net_if_addrs | |
736 | |
737 | |
738 class _Ipv6UnsupportedError(Exception): | |
739 pass | |
740 | |
741 | |
742 class Connections: | |
743 """A wrapper on top of /proc/net/* files, retrieving per-process | |
744 and system-wide open connections (TCP, UDP, UNIX) similarly to | |
745 "netstat -an". | |
746 | |
747 Note: in case of UNIX sockets we're only able to determine the | |
748 local endpoint/path, not the one it's connected to. | |
749 According to [1] it would be possible but not easily. | |
750 | |
751 [1] http://serverfault.com/a/417946 | |
752 """ | |
753 | |
754 def __init__(self): | |
755 # The string represents the basename of the corresponding | |
756 # /proc/net/{proto_name} file. | |
757 tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM) | |
758 tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM) | |
759 udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM) | |
760 udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM) | |
761 unix = ("unix", socket.AF_UNIX, None) | |
762 self.tmap = { | |
763 "all": (tcp4, tcp6, udp4, udp6, unix), | |
764 "tcp": (tcp4, tcp6), | |
765 "tcp4": (tcp4,), | |
766 "tcp6": (tcp6,), | |
767 "udp": (udp4, udp6), | |
768 "udp4": (udp4,), | |
769 "udp6": (udp6,), | |
770 "unix": (unix,), | |
771 "inet": (tcp4, tcp6, udp4, udp6), | |
772 "inet4": (tcp4, udp4), | |
773 "inet6": (tcp6, udp6), | |
774 } | |
775 self._procfs_path = None | |
776 | |
777 def get_proc_inodes(self, pid): | |
778 inodes = defaultdict(list) | |
779 for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)): | |
780 try: | |
781 inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd)) | |
782 except (FileNotFoundError, ProcessLookupError): | |
783 # ENOENT == file which is gone in the meantime; | |
784 # os.stat('/proc/%s' % self.pid) will be done later | |
785 # to force NSP (if it's the case) | |
786 continue | |
787 except OSError as err: | |
788 if err.errno == errno.EINVAL: | |
789 # not a link | |
790 continue | |
791 raise | |
792 else: | |
793 if inode.startswith('socket:['): | |
794 # the process is using a socket | |
795 inode = inode[8:][:-1] | |
796 inodes[inode].append((pid, int(fd))) | |
797 return inodes | |
798 | |
799 def get_all_inodes(self): | |
800 inodes = {} | |
801 for pid in pids(): | |
802 try: | |
803 inodes.update(self.get_proc_inodes(pid)) | |
804 except (FileNotFoundError, ProcessLookupError, PermissionError): | |
805 # os.listdir() is gonna raise a lot of access denied | |
806 # exceptions in case of unprivileged user; that's fine | |
807 # as we'll just end up returning a connection with PID | |
808 # and fd set to None anyway. | |
809 # Both netstat -an and lsof does the same so it's | |
810 # unlikely we can do any better. | |
811 # ENOENT just means a PID disappeared on us. | |
812 continue | |
813 return inodes | |
814 | |
815 @staticmethod | |
816 def decode_address(addr, family): | |
817 """Accept an "ip:port" address as displayed in /proc/net/* | |
818 and convert it into a human readable form, like: | |
819 | |
820 "0500000A:0016" -> ("10.0.0.5", 22) | |
821 "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521) | |
822 | |
823 The IP address portion is a little or big endian four-byte | |
824 hexadecimal number; that is, the least significant byte is listed | |
825 first, so we need to reverse the order of the bytes to convert it | |
826 to an IP address. | |
827 The port is represented as a two-byte hexadecimal number. | |
828 | |
829 Reference: | |
830 http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html | |
831 """ | |
832 ip, port = addr.split(':') | |
833 port = int(port, 16) | |
834 # this usually refers to a local socket in listen mode with | |
835 # no end-points connected | |
836 if not port: | |
837 return () | |
838 if PY3: | |
839 ip = ip.encode('ascii') | |
840 if family == socket.AF_INET: | |
841 # see: https://github.com/giampaolo/psutil/issues/201 | |
842 if LITTLE_ENDIAN: | |
843 ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1]) | |
844 else: | |
845 ip = socket.inet_ntop(family, base64.b16decode(ip)) | |
846 else: # IPv6 | |
847 # old version - let's keep it, just in case... | |
848 # ip = ip.decode('hex') | |
849 # return socket.inet_ntop(socket.AF_INET6, | |
850 # ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4))) | |
851 ip = base64.b16decode(ip) | |
852 try: | |
853 # see: https://github.com/giampaolo/psutil/issues/201 | |
854 if LITTLE_ENDIAN: | |
855 ip = socket.inet_ntop( | |
856 socket.AF_INET6, | |
857 struct.pack('>4I', *struct.unpack('<4I', ip))) | |
858 else: | |
859 ip = socket.inet_ntop( | |
860 socket.AF_INET6, | |
861 struct.pack('<4I', *struct.unpack('<4I', ip))) | |
862 except ValueError: | |
863 # see: https://github.com/giampaolo/psutil/issues/623 | |
864 if not supports_ipv6(): | |
865 raise _Ipv6UnsupportedError | |
866 else: | |
867 raise | |
868 return _common.addr(ip, port) | |
869 | |
870 @staticmethod | |
871 def process_inet(file, family, type_, inodes, filter_pid=None): | |
872 """Parse /proc/net/tcp* and /proc/net/udp* files.""" | |
873 if file.endswith('6') and not os.path.exists(file): | |
874 # IPv6 not supported | |
875 return | |
876 with open_text(file, buffering=BIGFILE_BUFFERING) as f: | |
877 f.readline() # skip the first line | |
878 for lineno, line in enumerate(f, 1): | |
879 try: | |
880 _, laddr, raddr, status, _, _, _, _, _, inode = \ | |
881 line.split()[:10] | |
882 except ValueError: | |
883 raise RuntimeError( | |
884 "error while parsing %s; malformed line %s %r" % ( | |
885 file, lineno, line)) | |
886 if inode in inodes: | |
887 # # We assume inet sockets are unique, so we error | |
888 # # out if there are multiple references to the | |
889 # # same inode. We won't do this for UNIX sockets. | |
890 # if len(inodes[inode]) > 1 and family != socket.AF_UNIX: | |
891 # raise ValueError("ambiguos inode with multiple " | |
892 # "PIDs references") | |
893 pid, fd = inodes[inode][0] | |
894 else: | |
895 pid, fd = None, -1 | |
896 if filter_pid is not None and filter_pid != pid: | |
897 continue | |
898 else: | |
899 if type_ == socket.SOCK_STREAM: | |
900 status = TCP_STATUSES[status] | |
901 else: | |
902 status = _common.CONN_NONE | |
903 try: | |
904 laddr = Connections.decode_address(laddr, family) | |
905 raddr = Connections.decode_address(raddr, family) | |
906 except _Ipv6UnsupportedError: | |
907 continue | |
908 yield (fd, family, type_, laddr, raddr, status, pid) | |
909 | |
910 @staticmethod | |
911 def process_unix(file, family, inodes, filter_pid=None): | |
912 """Parse /proc/net/unix files.""" | |
913 with open_text(file, buffering=BIGFILE_BUFFERING) as f: | |
914 f.readline() # skip the first line | |
915 for line in f: | |
916 tokens = line.split() | |
917 try: | |
918 _, _, _, _, type_, _, inode = tokens[0:7] | |
919 except ValueError: | |
920 if ' ' not in line: | |
921 # see: https://github.com/giampaolo/psutil/issues/766 | |
922 continue | |
923 raise RuntimeError( | |
924 "error while parsing %s; malformed line %r" % ( | |
925 file, line)) | |
926 if inode in inodes: | |
927 # With UNIX sockets we can have a single inode | |
928 # referencing many file descriptors. | |
929 pairs = inodes[inode] | |
930 else: | |
931 pairs = [(None, -1)] | |
932 for pid, fd in pairs: | |
933 if filter_pid is not None and filter_pid != pid: | |
934 continue | |
935 else: | |
936 if len(tokens) == 8: | |
937 path = tokens[-1] | |
938 else: | |
939 path = "" | |
940 type_ = _common.socktype_to_enum(int(type_)) | |
941 # XXX: determining the remote endpoint of a | |
942 # UNIX socket on Linux is not possible, see: | |
943 # https://serverfault.com/questions/252723/ | |
944 raddr = "" | |
945 status = _common.CONN_NONE | |
946 yield (fd, family, type_, path, raddr, status, pid) | |
947 | |
948 def retrieve(self, kind, pid=None): | |
949 if kind not in self.tmap: | |
950 raise ValueError("invalid %r kind argument; choose between %s" | |
951 % (kind, ', '.join([repr(x) for x in self.tmap]))) | |
952 self._procfs_path = get_procfs_path() | |
953 if pid is not None: | |
954 inodes = self.get_proc_inodes(pid) | |
955 if not inodes: | |
956 # no connections for this process | |
957 return [] | |
958 else: | |
959 inodes = self.get_all_inodes() | |
960 ret = set() | |
961 for proto_name, family, type_ in self.tmap[kind]: | |
962 path = "%s/net/%s" % (self._procfs_path, proto_name) | |
963 if family in (socket.AF_INET, socket.AF_INET6): | |
964 ls = self.process_inet( | |
965 path, family, type_, inodes, filter_pid=pid) | |
966 else: | |
967 ls = self.process_unix( | |
968 path, family, inodes, filter_pid=pid) | |
969 for fd, family, type_, laddr, raddr, status, bound_pid in ls: | |
970 if pid: | |
971 conn = _common.pconn(fd, family, type_, laddr, raddr, | |
972 status) | |
973 else: | |
974 conn = _common.sconn(fd, family, type_, laddr, raddr, | |
975 status, bound_pid) | |
976 ret.add(conn) | |
977 return list(ret) | |
978 | |
979 | |
980 _connections = Connections() | |
981 | |
982 | |
983 def net_connections(kind='inet'): | |
984 """Return system-wide open connections.""" | |
985 return _connections.retrieve(kind) | |
986 | |
987 | |
988 def net_io_counters(): | |
989 """Return network I/O statistics for every network interface | |
990 installed on the system as a dict of raw tuples. | |
991 """ | |
992 with open_text("%s/net/dev" % get_procfs_path()) as f: | |
993 lines = f.readlines() | |
994 retdict = {} | |
995 for line in lines[2:]: | |
996 colon = line.rfind(':') | |
997 assert colon > 0, repr(line) | |
998 name = line[:colon].strip() | |
999 fields = line[colon + 1:].strip().split() | |
1000 | |
1001 # in | |
1002 (bytes_recv, | |
1003 packets_recv, | |
1004 errin, | |
1005 dropin, | |
1006 fifoin, # unused | |
1007 framein, # unused | |
1008 compressedin, # unused | |
1009 multicastin, # unused | |
1010 # out | |
1011 bytes_sent, | |
1012 packets_sent, | |
1013 errout, | |
1014 dropout, | |
1015 fifoout, # unused | |
1016 collisionsout, # unused | |
1017 carrierout, # unused | |
1018 compressedout) = map(int, fields) | |
1019 | |
1020 retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv, | |
1021 errin, errout, dropin, dropout) | |
1022 return retdict | |
1023 | |
1024 | |
1025 def net_if_stats(): | |
1026 """Get NIC stats (isup, duplex, speed, mtu).""" | |
1027 duplex_map = {cext.DUPLEX_FULL: NIC_DUPLEX_FULL, | |
1028 cext.DUPLEX_HALF: NIC_DUPLEX_HALF, | |
1029 cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN} | |
1030 names = net_io_counters().keys() | |
1031 ret = {} | |
1032 for name in names: | |
1033 try: | |
1034 mtu = cext_posix.net_if_mtu(name) | |
1035 isup = cext_posix.net_if_flags(name) | |
1036 duplex, speed = cext.net_if_duplex_speed(name) | |
1037 except OSError as err: | |
1038 # https://github.com/giampaolo/psutil/issues/1279 | |
1039 if err.errno != errno.ENODEV: | |
1040 raise | |
1041 else: | |
1042 ret[name] = _common.snicstats(isup, duplex_map[duplex], speed, mtu) | |
1043 return ret | |
1044 | |
1045 | |
1046 # ===================================================================== | |
1047 # --- disks | |
1048 # ===================================================================== | |
1049 | |
1050 | |
1051 disk_usage = _psposix.disk_usage | |
1052 | |
1053 | |
1054 def disk_io_counters(perdisk=False): | |
1055 """Return disk I/O statistics for every disk installed on the | |
1056 system as a dict of raw tuples. | |
1057 """ | |
1058 def read_procfs(): | |
1059 # OK, this is a bit confusing. The format of /proc/diskstats can | |
1060 # have 3 variations. | |
1061 # On Linux 2.4 each line has always 15 fields, e.g.: | |
1062 # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8" | |
1063 # On Linux 2.6+ each line *usually* has 14 fields, and the disk | |
1064 # name is in another position, like this: | |
1065 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8" | |
1066 # ...unless (Linux 2.6) the line refers to a partition instead | |
1067 # of a disk, in which case the line has less fields (7): | |
1068 # "3 1 hda1 8 8 8 8" | |
1069 # 4.18+ has 4 fields added: | |
1070 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0" | |
1071 # 5.5 has 2 more fields. | |
1072 # See: | |
1073 # https://www.kernel.org/doc/Documentation/iostats.txt | |
1074 # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats | |
1075 with open_text("%s/diskstats" % get_procfs_path()) as f: | |
1076 lines = f.readlines() | |
1077 for line in lines: | |
1078 fields = line.split() | |
1079 flen = len(fields) | |
1080 if flen == 15: | |
1081 # Linux 2.4 | |
1082 name = fields[3] | |
1083 reads = int(fields[2]) | |
1084 (reads_merged, rbytes, rtime, writes, writes_merged, | |
1085 wbytes, wtime, _, busy_time, _) = map(int, fields[4:14]) | |
1086 elif flen == 14 or flen >= 18: | |
1087 # Linux 2.6+, line referring to a disk | |
1088 name = fields[2] | |
1089 (reads, reads_merged, rbytes, rtime, writes, writes_merged, | |
1090 wbytes, wtime, _, busy_time, _) = map(int, fields[3:14]) | |
1091 elif flen == 7: | |
1092 # Linux 2.6+, line referring to a partition | |
1093 name = fields[2] | |
1094 reads, rbytes, writes, wbytes = map(int, fields[3:]) | |
1095 rtime = wtime = reads_merged = writes_merged = busy_time = 0 | |
1096 else: | |
1097 raise ValueError("not sure how to interpret line %r" % line) | |
1098 yield (name, reads, writes, rbytes, wbytes, rtime, wtime, | |
1099 reads_merged, writes_merged, busy_time) | |
1100 | |
1101 def read_sysfs(): | |
1102 for block in os.listdir('/sys/block'): | |
1103 for root, _, files in os.walk(os.path.join('/sys/block', block)): | |
1104 if 'stat' not in files: | |
1105 continue | |
1106 with open_text(os.path.join(root, 'stat')) as f: | |
1107 fields = f.read().strip().split() | |
1108 name = os.path.basename(root) | |
1109 (reads, reads_merged, rbytes, rtime, writes, writes_merged, | |
1110 wbytes, wtime, _, busy_time) = map(int, fields[:10]) | |
1111 yield (name, reads, writes, rbytes, wbytes, rtime, | |
1112 wtime, reads_merged, writes_merged, busy_time) | |
1113 | |
1114 if os.path.exists('%s/diskstats' % get_procfs_path()): | |
1115 gen = read_procfs() | |
1116 elif os.path.exists('/sys/block'): | |
1117 gen = read_sysfs() | |
1118 else: | |
1119 raise NotImplementedError( | |
1120 "%s/diskstats nor /sys/block filesystem are available on this " | |
1121 "system" % get_procfs_path()) | |
1122 | |
1123 retdict = {} | |
1124 for entry in gen: | |
1125 (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, | |
1126 writes_merged, busy_time) = entry | |
1127 if not perdisk and not is_storage_device(name): | |
1128 # perdisk=False means we want to calculate totals so we skip | |
1129 # partitions (e.g. 'sda1', 'nvme0n1p1') and only include | |
1130 # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks | |
1131 # include a total of all their partitions + some extra size | |
1132 # of their own: | |
1133 # $ cat /proc/diskstats | |
1134 # 259 0 sda 10485760 ... | |
1135 # 259 1 sda1 5186039 ... | |
1136 # 259 1 sda2 5082039 ... | |
1137 # See: | |
1138 # https://github.com/giampaolo/psutil/pull/1313 | |
1139 continue | |
1140 | |
1141 rbytes *= DISK_SECTOR_SIZE | |
1142 wbytes *= DISK_SECTOR_SIZE | |
1143 retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime, | |
1144 reads_merged, writes_merged, busy_time) | |
1145 | |
1146 return retdict | |
1147 | |
1148 | |
1149 def disk_partitions(all=False): | |
1150 """Return mounted disk partitions as a list of namedtuples.""" | |
1151 fstypes = set() | |
1152 procfs_path = get_procfs_path() | |
1153 with open_text("%s/filesystems" % procfs_path) as f: | |
1154 for line in f: | |
1155 line = line.strip() | |
1156 if not line.startswith("nodev"): | |
1157 fstypes.add(line.strip()) | |
1158 else: | |
1159 # ignore all lines starting with "nodev" except "nodev zfs" | |
1160 fstype = line.split("\t")[1] | |
1161 if fstype == "zfs": | |
1162 fstypes.add("zfs") | |
1163 | |
1164 # See: https://github.com/giampaolo/psutil/issues/1307 | |
1165 if procfs_path == "/proc" and os.path.isfile('/etc/mtab'): | |
1166 mounts_path = os.path.realpath("/etc/mtab") | |
1167 else: | |
1168 mounts_path = os.path.realpath("%s/self/mounts" % procfs_path) | |
1169 | |
1170 retlist = [] | |
1171 partitions = cext.disk_partitions(mounts_path) | |
1172 for partition in partitions: | |
1173 device, mountpoint, fstype, opts = partition | |
1174 if device == 'none': | |
1175 device = '' | |
1176 if not all: | |
1177 if device == '' or fstype not in fstypes: | |
1178 continue | |
1179 ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) | |
1180 retlist.append(ntuple) | |
1181 | |
1182 return retlist | |
1183 | |
1184 | |
1185 # ===================================================================== | |
1186 # --- sensors | |
1187 # ===================================================================== | |
1188 | |
1189 | |
1190 def sensors_temperatures(): | |
1191 """Return hardware (CPU and others) temperatures as a dict | |
1192 including hardware name, label, current, max and critical | |
1193 temperatures. | |
1194 | |
1195 Implementation notes: | |
1196 - /sys/class/hwmon looks like the most recent interface to | |
1197 retrieve this info, and this implementation relies on it | |
1198 only (old distros will probably use something else) | |
1199 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon | |
1200 - /sys/class/thermal/thermal_zone* is another one but it's more | |
1201 difficult to parse | |
1202 """ | |
1203 ret = collections.defaultdict(list) | |
1204 basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') | |
1205 # CentOS has an intermediate /device directory: | |
1206 # https://github.com/giampaolo/psutil/issues/971 | |
1207 # https://github.com/nicolargo/glances/issues/1060 | |
1208 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) | |
1209 basenames.extend(glob.glob( | |
1210 '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*')) | |
1211 basenames = sorted(set([x.split('_')[0] for x in basenames])) | |
1212 | |
1213 for base in basenames: | |
1214 try: | |
1215 path = base + '_input' | |
1216 current = float(cat(path)) / 1000.0 | |
1217 path = os.path.join(os.path.dirname(base), 'name') | |
1218 unit_name = cat(path, binary=False) | |
1219 except (IOError, OSError, ValueError): | |
1220 # A lot of things can go wrong here, so let's just skip the | |
1221 # whole entry. Sure thing is Linux's /sys/class/hwmon really | |
1222 # is a stinky broken mess. | |
1223 # https://github.com/giampaolo/psutil/issues/1009 | |
1224 # https://github.com/giampaolo/psutil/issues/1101 | |
1225 # https://github.com/giampaolo/psutil/issues/1129 | |
1226 # https://github.com/giampaolo/psutil/issues/1245 | |
1227 # https://github.com/giampaolo/psutil/issues/1323 | |
1228 continue | |
1229 | |
1230 high = cat(base + '_max', fallback=None) | |
1231 critical = cat(base + '_crit', fallback=None) | |
1232 label = cat(base + '_label', fallback='', binary=False) | |
1233 | |
1234 if high is not None: | |
1235 try: | |
1236 high = float(high) / 1000.0 | |
1237 except ValueError: | |
1238 high = None | |
1239 if critical is not None: | |
1240 try: | |
1241 critical = float(critical) / 1000.0 | |
1242 except ValueError: | |
1243 critical = None | |
1244 | |
1245 ret[unit_name].append((label, current, high, critical)) | |
1246 | |
1247 # Indication that no sensors were detected in /sys/class/hwmon/ | |
1248 if not basenames: | |
1249 basenames = glob.glob('/sys/class/thermal/thermal_zone*') | |
1250 basenames = sorted(set(basenames)) | |
1251 | |
1252 for base in basenames: | |
1253 try: | |
1254 path = os.path.join(base, 'temp') | |
1255 current = float(cat(path)) / 1000.0 | |
1256 path = os.path.join(base, 'type') | |
1257 unit_name = cat(path, binary=False) | |
1258 except (IOError, OSError, ValueError) as err: | |
1259 debug("ignoring %r for file %r" % (err, path)) | |
1260 continue | |
1261 | |
1262 trip_paths = glob.glob(base + '/trip_point*') | |
1263 trip_points = set(['_'.join( | |
1264 os.path.basename(p).split('_')[0:3]) for p in trip_paths]) | |
1265 critical = None | |
1266 high = None | |
1267 for trip_point in trip_points: | |
1268 path = os.path.join(base, trip_point + "_type") | |
1269 trip_type = cat(path, fallback='', binary=False) | |
1270 if trip_type == 'critical': | |
1271 critical = cat(os.path.join(base, trip_point + "_temp"), | |
1272 fallback=None) | |
1273 elif trip_type == 'high': | |
1274 high = cat(os.path.join(base, trip_point + "_temp"), | |
1275 fallback=None) | |
1276 | |
1277 if high is not None: | |
1278 try: | |
1279 high = float(high) / 1000.0 | |
1280 except ValueError: | |
1281 high = None | |
1282 if critical is not None: | |
1283 try: | |
1284 critical = float(critical) / 1000.0 | |
1285 except ValueError: | |
1286 critical = None | |
1287 | |
1288 ret[unit_name].append(('', current, high, critical)) | |
1289 | |
1290 return dict(ret) | |
1291 | |
1292 | |
1293 def sensors_fans(): | |
1294 """Return hardware fans info (for CPU and other peripherals) as a | |
1295 dict including hardware label and current speed. | |
1296 | |
1297 Implementation notes: | |
1298 - /sys/class/hwmon looks like the most recent interface to | |
1299 retrieve this info, and this implementation relies on it | |
1300 only (old distros will probably use something else) | |
1301 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon | |
1302 """ | |
1303 ret = collections.defaultdict(list) | |
1304 basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') | |
1305 if not basenames: | |
1306 # CentOS has an intermediate /device directory: | |
1307 # https://github.com/giampaolo/psutil/issues/971 | |
1308 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') | |
1309 | |
1310 basenames = sorted(set([x.split('_')[0] for x in basenames])) | |
1311 for base in basenames: | |
1312 try: | |
1313 current = int(cat(base + '_input')) | |
1314 except (IOError, OSError) as err: | |
1315 warnings.warn("ignoring %r" % err, RuntimeWarning) | |
1316 continue | |
1317 unit_name = cat(os.path.join(os.path.dirname(base), 'name'), | |
1318 binary=False) | |
1319 label = cat(base + '_label', fallback='', binary=False) | |
1320 ret[unit_name].append(_common.sfan(label, current)) | |
1321 | |
1322 return dict(ret) | |
1323 | |
1324 | |
1325 def sensors_battery(): | |
1326 """Return battery information. | |
1327 Implementation note: it appears /sys/class/power_supply/BAT0/ | |
1328 directory structure may vary and provide files with the same | |
1329 meaning but under different names, see: | |
1330 https://github.com/giampaolo/psutil/issues/966 | |
1331 """ | |
1332 null = object() | |
1333 | |
1334 def multi_cat(*paths): | |
1335 """Attempt to read the content of multiple files which may | |
1336 not exist. If none of them exist return None. | |
1337 """ | |
1338 for path in paths: | |
1339 ret = cat(path, fallback=null) | |
1340 if ret != null: | |
1341 return int(ret) if ret.isdigit() else ret | |
1342 return None | |
1343 | |
1344 bats = [x for x in os.listdir(POWER_SUPPLY_PATH) if x.startswith('BAT')] | |
1345 if not bats: | |
1346 return None | |
1347 # Get the first available battery. Usually this is "BAT0", except | |
1348 # some rare exceptions: | |
1349 # https://github.com/giampaolo/psutil/issues/1238 | |
1350 root = os.path.join(POWER_SUPPLY_PATH, sorted(bats)[0]) | |
1351 | |
1352 # Base metrics. | |
1353 energy_now = multi_cat( | |
1354 root + "/energy_now", | |
1355 root + "/charge_now") | |
1356 power_now = multi_cat( | |
1357 root + "/power_now", | |
1358 root + "/current_now") | |
1359 energy_full = multi_cat( | |
1360 root + "/energy_full", | |
1361 root + "/charge_full") | |
1362 if energy_now is None or power_now is None: | |
1363 return None | |
1364 | |
1365 # Percent. If we have energy_full the percentage will be more | |
1366 # accurate compared to reading /capacity file (float vs. int). | |
1367 if energy_full is not None: | |
1368 try: | |
1369 percent = 100.0 * energy_now / energy_full | |
1370 except ZeroDivisionError: | |
1371 percent = 0.0 | |
1372 else: | |
1373 percent = int(cat(root + "/capacity", fallback=-1)) | |
1374 if percent == -1: | |
1375 return None | |
1376 | |
1377 # Is AC power cable plugged in? | |
1378 # Note: AC0 is not always available and sometimes (e.g. CentOS7) | |
1379 # it's called "AC". | |
1380 power_plugged = None | |
1381 online = multi_cat( | |
1382 os.path.join(POWER_SUPPLY_PATH, "AC0/online"), | |
1383 os.path.join(POWER_SUPPLY_PATH, "AC/online")) | |
1384 if online is not None: | |
1385 power_plugged = online == 1 | |
1386 else: | |
1387 status = cat(root + "/status", fallback="", binary=False).lower() | |
1388 if status == "discharging": | |
1389 power_plugged = False | |
1390 elif status in ("charging", "full"): | |
1391 power_plugged = True | |
1392 | |
1393 # Seconds left. | |
1394 # Note to self: we may also calculate the charging ETA as per: | |
1395 # https://github.com/thialfihar/dotfiles/blob/ | |
1396 # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55 | |
1397 if power_plugged: | |
1398 secsleft = _common.POWER_TIME_UNLIMITED | |
1399 else: | |
1400 try: | |
1401 secsleft = int(energy_now / power_now * 3600) | |
1402 except ZeroDivisionError: | |
1403 secsleft = _common.POWER_TIME_UNKNOWN | |
1404 | |
1405 return _common.sbattery(percent, secsleft, power_plugged) | |
1406 | |
1407 | |
1408 # ===================================================================== | |
1409 # --- other system functions | |
1410 # ===================================================================== | |
1411 | |
1412 | |
1413 def users(): | |
1414 """Return currently connected users as a list of namedtuples.""" | |
1415 retlist = [] | |
1416 rawlist = cext.users() | |
1417 for item in rawlist: | |
1418 user, tty, hostname, tstamp, user_process, pid = item | |
1419 # note: the underlying C function includes entries about | |
1420 # system boot, run level and others. We might want | |
1421 # to use them in the future. | |
1422 if not user_process: | |
1423 continue | |
1424 if hostname in (':0.0', ':0'): | |
1425 hostname = 'localhost' | |
1426 nt = _common.suser(user, tty or None, hostname, tstamp, pid) | |
1427 retlist.append(nt) | |
1428 return retlist | |
1429 | |
1430 | |
1431 def boot_time(): | |
1432 """Return the system boot time expressed in seconds since the epoch.""" | |
1433 global BOOT_TIME | |
1434 path = '%s/stat' % get_procfs_path() | |
1435 with open_binary(path) as f: | |
1436 for line in f: | |
1437 if line.startswith(b'btime'): | |
1438 ret = float(line.strip().split()[1]) | |
1439 BOOT_TIME = ret | |
1440 return ret | |
1441 raise RuntimeError( | |
1442 "line 'btime' not found in %s" % path) | |
1443 | |
1444 | |
1445 # ===================================================================== | |
1446 # --- processes | |
1447 # ===================================================================== | |
1448 | |
1449 | |
1450 def pids(): | |
1451 """Returns a list of PIDs currently running on the system.""" | |
1452 return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()] | |
1453 | |
1454 | |
1455 def pid_exists(pid): | |
1456 """Check for the existence of a unix PID. Linux TIDs are not | |
1457 supported (always return False). | |
1458 """ | |
1459 if not _psposix.pid_exists(pid): | |
1460 return False | |
1461 else: | |
1462 # Linux's apparently does not distinguish between PIDs and TIDs | |
1463 # (thread IDs). | |
1464 # listdir("/proc") won't show any TID (only PIDs) but | |
1465 # os.stat("/proc/{tid}") will succeed if {tid} exists. | |
1466 # os.kill() can also be passed a TID. This is quite confusing. | |
1467 # In here we want to enforce this distinction and support PIDs | |
1468 # only, see: | |
1469 # https://github.com/giampaolo/psutil/issues/687 | |
1470 try: | |
1471 # Note: already checked that this is faster than using a | |
1472 # regular expr. Also (a lot) faster than doing | |
1473 # 'return pid in pids()' | |
1474 path = "%s/%s/status" % (get_procfs_path(), pid) | |
1475 with open_binary(path) as f: | |
1476 for line in f: | |
1477 if line.startswith(b"Tgid:"): | |
1478 tgid = int(line.split()[1]) | |
1479 # If tgid and pid are the same then we're | |
1480 # dealing with a process PID. | |
1481 return tgid == pid | |
1482 raise ValueError("'Tgid' line not found in %s" % path) | |
1483 except (EnvironmentError, ValueError): | |
1484 return pid in pids() | |
1485 | |
1486 | |
1487 def ppid_map(): | |
1488 """Obtain a {pid: ppid, ...} dict for all running processes in | |
1489 one shot. Used to speed up Process.children(). | |
1490 """ | |
1491 ret = {} | |
1492 procfs_path = get_procfs_path() | |
1493 for pid in pids(): | |
1494 try: | |
1495 with open_binary("%s/%s/stat" % (procfs_path, pid)) as f: | |
1496 data = f.read() | |
1497 except (FileNotFoundError, ProcessLookupError): | |
1498 # Note: we should be able to access /stat for all processes | |
1499 # aka it's unlikely we'll bump into EPERM, which is good. | |
1500 pass | |
1501 else: | |
1502 rpar = data.rfind(b')') | |
1503 dset = data[rpar + 2:].split() | |
1504 ppid = int(dset[1]) | |
1505 ret[pid] = ppid | |
1506 return ret | |
1507 | |
1508 | |
1509 def wrap_exceptions(fun): | |
1510 """Decorator which translates bare OSError and IOError exceptions | |
1511 into NoSuchProcess and AccessDenied. | |
1512 """ | |
1513 @functools.wraps(fun) | |
1514 def wrapper(self, *args, **kwargs): | |
1515 try: | |
1516 return fun(self, *args, **kwargs) | |
1517 except PermissionError: | |
1518 raise AccessDenied(self.pid, self._name) | |
1519 except ProcessLookupError: | |
1520 raise NoSuchProcess(self.pid, self._name) | |
1521 except FileNotFoundError: | |
1522 if not os.path.exists("%s/%s" % (self._procfs_path, self.pid)): | |
1523 raise NoSuchProcess(self.pid, self._name) | |
1524 # Note: zombies will keep existing under /proc until they're | |
1525 # gone so there's no way to distinguish them in here. | |
1526 raise | |
1527 return wrapper | |
1528 | |
1529 | |
1530 class Process(object): | |
1531 """Linux process implementation.""" | |
1532 | |
1533 __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] | |
1534 | |
1535 def __init__(self, pid): | |
1536 self.pid = pid | |
1537 self._name = None | |
1538 self._ppid = None | |
1539 self._procfs_path = get_procfs_path() | |
1540 | |
1541 def _assert_alive(self): | |
1542 """Raise NSP if the process disappeared on us.""" | |
1543 # For those C function who do not raise NSP, possibly returning | |
1544 # incorrect or incomplete result. | |
1545 os.stat('%s/%s' % (self._procfs_path, self.pid)) | |
1546 | |
1547 @wrap_exceptions | |
1548 @memoize_when_activated | |
1549 def _parse_stat_file(self): | |
1550 """Parse /proc/{pid}/stat file and return a dict with various | |
1551 process info. | |
1552 Using "man proc" as a reference: where "man proc" refers to | |
1553 position N always substract 3 (e.g ppid position 4 in | |
1554 'man proc' == position 1 in here). | |
1555 The return value is cached in case oneshot() ctx manager is | |
1556 in use. | |
1557 """ | |
1558 with open_binary("%s/%s/stat" % (self._procfs_path, self.pid)) as f: | |
1559 data = f.read() | |
1560 # Process name is between parentheses. It can contain spaces and | |
1561 # other parentheses. This is taken into account by looking for | |
1562 # the first occurrence of "(" and the last occurence of ")". | |
1563 rpar = data.rfind(b')') | |
1564 name = data[data.find(b'(') + 1:rpar] | |
1565 fields = data[rpar + 2:].split() | |
1566 | |
1567 ret = {} | |
1568 ret['name'] = name | |
1569 ret['status'] = fields[0] | |
1570 ret['ppid'] = fields[1] | |
1571 ret['ttynr'] = fields[4] | |
1572 ret['utime'] = fields[11] | |
1573 ret['stime'] = fields[12] | |
1574 ret['children_utime'] = fields[13] | |
1575 ret['children_stime'] = fields[14] | |
1576 ret['create_time'] = fields[19] | |
1577 ret['cpu_num'] = fields[36] | |
1578 ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks' | |
1579 | |
1580 return ret | |
1581 | |
1582 @wrap_exceptions | |
1583 @memoize_when_activated | |
1584 def _read_status_file(self): | |
1585 """Read /proc/{pid}/stat file and return its content. | |
1586 The return value is cached in case oneshot() ctx manager is | |
1587 in use. | |
1588 """ | |
1589 with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f: | |
1590 return f.read() | |
1591 | |
1592 @wrap_exceptions | |
1593 @memoize_when_activated | |
1594 def _read_smaps_file(self): | |
1595 with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid), | |
1596 buffering=BIGFILE_BUFFERING) as f: | |
1597 return f.read().strip() | |
1598 | |
1599 def oneshot_enter(self): | |
1600 self._parse_stat_file.cache_activate(self) | |
1601 self._read_status_file.cache_activate(self) | |
1602 self._read_smaps_file.cache_activate(self) | |
1603 | |
1604 def oneshot_exit(self): | |
1605 self._parse_stat_file.cache_deactivate(self) | |
1606 self._read_status_file.cache_deactivate(self) | |
1607 self._read_smaps_file.cache_deactivate(self) | |
1608 | |
1609 @wrap_exceptions | |
1610 def name(self): | |
1611 name = self._parse_stat_file()['name'] | |
1612 if PY3: | |
1613 name = decode(name) | |
1614 # XXX - gets changed later and probably needs refactoring | |
1615 return name | |
1616 | |
1617 def exe(self): | |
1618 try: | |
1619 return readlink("%s/%s/exe" % (self._procfs_path, self.pid)) | |
1620 except (FileNotFoundError, ProcessLookupError): | |
1621 # no such file error; might be raised also if the | |
1622 # path actually exists for system processes with | |
1623 # low pids (about 0-20) | |
1624 if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)): | |
1625 return "" | |
1626 else: | |
1627 if not pid_exists(self.pid): | |
1628 raise NoSuchProcess(self.pid, self._name) | |
1629 else: | |
1630 raise ZombieProcess(self.pid, self._name, self._ppid) | |
1631 except PermissionError: | |
1632 raise AccessDenied(self.pid, self._name) | |
1633 | |
1634 @wrap_exceptions | |
1635 def cmdline(self): | |
1636 with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f: | |
1637 data = f.read() | |
1638 if not data: | |
1639 # may happen in case of zombie process | |
1640 return [] | |
1641 # 'man proc' states that args are separated by null bytes '\0' | |
1642 # and last char is supposed to be a null byte. Nevertheless | |
1643 # some processes may change their cmdline after being started | |
1644 # (via setproctitle() or similar), they are usually not | |
1645 # compliant with this rule and use spaces instead. Google | |
1646 # Chrome process is an example. See: | |
1647 # https://github.com/giampaolo/psutil/issues/1179 | |
1648 sep = '\x00' if data.endswith('\x00') else ' ' | |
1649 if data.endswith(sep): | |
1650 data = data[:-1] | |
1651 cmdline = data.split(sep) | |
1652 # Sometimes last char is a null byte '\0' but the args are | |
1653 # separated by spaces, see: https://github.com/giampaolo/psutil/ | |
1654 # issues/1179#issuecomment-552984549 | |
1655 if sep == '\x00' and len(cmdline) == 1 and ' ' in data: | |
1656 cmdline = data.split(' ') | |
1657 return cmdline | |
1658 | |
1659 @wrap_exceptions | |
1660 def environ(self): | |
1661 with open_text("%s/%s/environ" % (self._procfs_path, self.pid)) as f: | |
1662 data = f.read() | |
1663 return parse_environ_block(data) | |
1664 | |
1665 @wrap_exceptions | |
1666 def terminal(self): | |
1667 tty_nr = int(self._parse_stat_file()['ttynr']) | |
1668 tmap = _psposix.get_terminal_map() | |
1669 try: | |
1670 return tmap[tty_nr] | |
1671 except KeyError: | |
1672 return None | |
1673 | |
1674 # May not be available on old kernels. | |
1675 if os.path.exists('/proc/%s/io' % os.getpid()): | |
1676 @wrap_exceptions | |
1677 def io_counters(self): | |
1678 fname = "%s/%s/io" % (self._procfs_path, self.pid) | |
1679 fields = {} | |
1680 with open_binary(fname) as f: | |
1681 for line in f: | |
1682 # https://github.com/giampaolo/psutil/issues/1004 | |
1683 line = line.strip() | |
1684 if line: | |
1685 try: | |
1686 name, value = line.split(b': ') | |
1687 except ValueError: | |
1688 # https://github.com/giampaolo/psutil/issues/1004 | |
1689 continue | |
1690 else: | |
1691 fields[name] = int(value) | |
1692 if not fields: | |
1693 raise RuntimeError("%s file was empty" % fname) | |
1694 try: | |
1695 return pio( | |
1696 fields[b'syscr'], # read syscalls | |
1697 fields[b'syscw'], # write syscalls | |
1698 fields[b'read_bytes'], # read bytes | |
1699 fields[b'write_bytes'], # write bytes | |
1700 fields[b'rchar'], # read chars | |
1701 fields[b'wchar'], # write chars | |
1702 ) | |
1703 except KeyError as err: | |
1704 raise ValueError("%r field was not found in %s; found fields " | |
1705 "are %r" % (err[0], fname, fields)) | |
1706 | |
1707 @wrap_exceptions | |
1708 def cpu_times(self): | |
1709 values = self._parse_stat_file() | |
1710 utime = float(values['utime']) / CLOCK_TICKS | |
1711 stime = float(values['stime']) / CLOCK_TICKS | |
1712 children_utime = float(values['children_utime']) / CLOCK_TICKS | |
1713 children_stime = float(values['children_stime']) / CLOCK_TICKS | |
1714 iowait = float(values['blkio_ticks']) / CLOCK_TICKS | |
1715 return pcputimes(utime, stime, children_utime, children_stime, iowait) | |
1716 | |
1717 @wrap_exceptions | |
1718 def cpu_num(self): | |
1719 """What CPU the process is on.""" | |
1720 return int(self._parse_stat_file()['cpu_num']) | |
1721 | |
1722 @wrap_exceptions | |
1723 def wait(self, timeout=None): | |
1724 return _psposix.wait_pid(self.pid, timeout, self._name) | |
1725 | |
1726 @wrap_exceptions | |
1727 def create_time(self): | |
1728 ctime = float(self._parse_stat_file()['create_time']) | |
1729 # According to documentation, starttime is in field 21 and the | |
1730 # unit is jiffies (clock ticks). | |
1731 # We first divide it for clock ticks and then add uptime returning | |
1732 # seconds since the epoch, in UTC. | |
1733 # Also use cached value if available. | |
1734 bt = BOOT_TIME or boot_time() | |
1735 return (ctime / CLOCK_TICKS) + bt | |
1736 | |
1737 @wrap_exceptions | |
1738 def memory_info(self): | |
1739 # ============================================================ | |
1740 # | FIELD | DESCRIPTION | AKA | TOP | | |
1741 # ============================================================ | |
1742 # | rss | resident set size | | RES | | |
1743 # | vms | total program size | size | VIRT | | |
1744 # | shared | shared pages (from shared mappings) | | SHR | | |
1745 # | text | text ('code') | trs | CODE | | |
1746 # | lib | library (unused in Linux 2.6) | lrs | | | |
1747 # | data | data + stack | drs | DATA | | |
1748 # | dirty | dirty pages (unused in Linux 2.6) | dt | | | |
1749 # ============================================================ | |
1750 with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f: | |
1751 vms, rss, shared, text, lib, data, dirty = \ | |
1752 [int(x) * PAGESIZE for x in f.readline().split()[:7]] | |
1753 return pmem(rss, vms, shared, text, lib, data, dirty) | |
1754 | |
1755 # /proc/pid/smaps does not exist on kernels < 2.6.14 or if | |
1756 # CONFIG_MMU kernel configuration option is not enabled. | |
1757 if HAS_SMAPS: | |
1758 | |
1759 @wrap_exceptions | |
1760 def memory_full_info( | |
1761 self, | |
1762 # Gets Private_Clean, Private_Dirty, Private_Hugetlb. | |
1763 _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"), | |
1764 _pss_re=re.compile(br"\nPss\:\s+(\d+)"), | |
1765 _swap_re=re.compile(br"\nSwap\:\s+(\d+)")): | |
1766 basic_mem = self.memory_info() | |
1767 # Note: using 3 regexes is faster than reading the file | |
1768 # line by line. | |
1769 # XXX: on Python 3 the 2 regexes are 30% slower than on | |
1770 # Python 2 though. Figure out why. | |
1771 # | |
1772 # You might be tempted to calculate USS by subtracting | |
1773 # the "shared" value from the "resident" value in | |
1774 # /proc/<pid>/statm. But at least on Linux, statm's "shared" | |
1775 # value actually counts pages backed by files, which has | |
1776 # little to do with whether the pages are actually shared. | |
1777 # /proc/self/smaps on the other hand appears to give us the | |
1778 # correct information. | |
1779 smaps_data = self._read_smaps_file() | |
1780 # Note: smaps file can be empty for certain processes. | |
1781 # The code below will not crash though and will result to 0. | |
1782 uss = sum(map(int, _private_re.findall(smaps_data))) * 1024 | |
1783 pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024 | |
1784 swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024 | |
1785 return pfullmem(*basic_mem + (uss, pss, swap)) | |
1786 | |
1787 else: | |
1788 memory_full_info = memory_info | |
1789 | |
1790 if HAS_SMAPS: | |
1791 | |
1792 @wrap_exceptions | |
1793 def memory_maps(self): | |
1794 """Return process's mapped memory regions as a list of named | |
1795 tuples. Fields are explained in 'man proc'; here is an updated | |
1796 (Apr 2012) version: http://goo.gl/fmebo | |
1797 | |
1798 /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if | |
1799 CONFIG_MMU kernel configuration option is not enabled. | |
1800 """ | |
1801 def get_blocks(lines, current_block): | |
1802 data = {} | |
1803 for line in lines: | |
1804 fields = line.split(None, 5) | |
1805 if not fields[0].endswith(b':'): | |
1806 # new block section | |
1807 yield (current_block.pop(), data) | |
1808 current_block.append(line) | |
1809 else: | |
1810 try: | |
1811 data[fields[0]] = int(fields[1]) * 1024 | |
1812 except ValueError: | |
1813 if fields[0].startswith(b'VmFlags:'): | |
1814 # see issue #369 | |
1815 continue | |
1816 else: | |
1817 raise ValueError("don't know how to inte" | |
1818 "rpret line %r" % line) | |
1819 yield (current_block.pop(), data) | |
1820 | |
1821 data = self._read_smaps_file() | |
1822 # Note: smaps file can be empty for certain processes. | |
1823 if not data: | |
1824 return [] | |
1825 lines = data.split(b'\n') | |
1826 ls = [] | |
1827 first_line = lines.pop(0) | |
1828 current_block = [first_line] | |
1829 for header, data in get_blocks(lines, current_block): | |
1830 hfields = header.split(None, 5) | |
1831 try: | |
1832 addr, perms, offset, dev, inode, path = hfields | |
1833 except ValueError: | |
1834 addr, perms, offset, dev, inode, path = \ | |
1835 hfields + [''] | |
1836 if not path: | |
1837 path = '[anon]' | |
1838 else: | |
1839 if PY3: | |
1840 path = decode(path) | |
1841 path = path.strip() | |
1842 if (path.endswith(' (deleted)') and not | |
1843 path_exists_strict(path)): | |
1844 path = path[:-10] | |
1845 ls.append(( | |
1846 decode(addr), decode(perms), path, | |
1847 data.get(b'Rss:', 0), | |
1848 data.get(b'Size:', 0), | |
1849 data.get(b'Pss:', 0), | |
1850 data.get(b'Shared_Clean:', 0), | |
1851 data.get(b'Shared_Dirty:', 0), | |
1852 data.get(b'Private_Clean:', 0), | |
1853 data.get(b'Private_Dirty:', 0), | |
1854 data.get(b'Referenced:', 0), | |
1855 data.get(b'Anonymous:', 0), | |
1856 data.get(b'Swap:', 0) | |
1857 )) | |
1858 return ls | |
1859 | |
1860 @wrap_exceptions | |
1861 def cwd(self): | |
1862 try: | |
1863 return readlink("%s/%s/cwd" % (self._procfs_path, self.pid)) | |
1864 except (FileNotFoundError, ProcessLookupError): | |
1865 # https://github.com/giampaolo/psutil/issues/986 | |
1866 if not pid_exists(self.pid): | |
1867 raise NoSuchProcess(self.pid, self._name) | |
1868 else: | |
1869 raise ZombieProcess(self.pid, self._name, self._ppid) | |
1870 | |
1871 @wrap_exceptions | |
1872 def num_ctx_switches(self, | |
1873 _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)')): | |
1874 data = self._read_status_file() | |
1875 ctxsw = _ctxsw_re.findall(data) | |
1876 if not ctxsw: | |
1877 raise NotImplementedError( | |
1878 "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'" | |
1879 "lines were not found in %s/%s/status; the kernel is " | |
1880 "probably older than 2.6.23" % ( | |
1881 self._procfs_path, self.pid)) | |
1882 else: | |
1883 return _common.pctxsw(int(ctxsw[0]), int(ctxsw[1])) | |
1884 | |
1885 @wrap_exceptions | |
1886 def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')): | |
1887 # Note: on Python 3 using a re is faster than iterating over file | |
1888 # line by line. On Python 2 is the exact opposite, and iterating | |
1889 # over a file on Python 3 is slower than on Python 2. | |
1890 data = self._read_status_file() | |
1891 return int(_num_threads_re.findall(data)[0]) | |
1892 | |
1893 @wrap_exceptions | |
1894 def threads(self): | |
1895 thread_ids = os.listdir("%s/%s/task" % (self._procfs_path, self.pid)) | |
1896 thread_ids.sort() | |
1897 retlist = [] | |
1898 hit_enoent = False | |
1899 for thread_id in thread_ids: | |
1900 fname = "%s/%s/task/%s/stat" % ( | |
1901 self._procfs_path, self.pid, thread_id) | |
1902 try: | |
1903 with open_binary(fname) as f: | |
1904 st = f.read().strip() | |
1905 except FileNotFoundError: | |
1906 # no such file or directory; it means thread | |
1907 # disappeared on us | |
1908 hit_enoent = True | |
1909 continue | |
1910 # ignore the first two values ("pid (exe)") | |
1911 st = st[st.find(b')') + 2:] | |
1912 values = st.split(b' ') | |
1913 utime = float(values[11]) / CLOCK_TICKS | |
1914 stime = float(values[12]) / CLOCK_TICKS | |
1915 ntuple = _common.pthread(int(thread_id), utime, stime) | |
1916 retlist.append(ntuple) | |
1917 if hit_enoent: | |
1918 self._assert_alive() | |
1919 return retlist | |
1920 | |
1921 @wrap_exceptions | |
1922 def nice_get(self): | |
1923 # with open_text('%s/%s/stat' % (self._procfs_path, self.pid)) as f: | |
1924 # data = f.read() | |
1925 # return int(data.split()[18]) | |
1926 | |
1927 # Use C implementation | |
1928 return cext_posix.getpriority(self.pid) | |
1929 | |
1930 @wrap_exceptions | |
1931 def nice_set(self, value): | |
1932 return cext_posix.setpriority(self.pid, value) | |
1933 | |
1934 # starting from CentOS 6. | |
1935 if HAS_CPU_AFFINITY: | |
1936 | |
1937 @wrap_exceptions | |
1938 def cpu_affinity_get(self): | |
1939 return cext.proc_cpu_affinity_get(self.pid) | |
1940 | |
1941 def _get_eligible_cpus( | |
1942 self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")): | |
1943 # See: https://github.com/giampaolo/psutil/issues/956 | |
1944 data = self._read_status_file() | |
1945 match = _re.findall(data) | |
1946 if match: | |
1947 return list(range(int(match[0][0]), int(match[0][1]) + 1)) | |
1948 else: | |
1949 return list(range(len(per_cpu_times()))) | |
1950 | |
1951 @wrap_exceptions | |
1952 def cpu_affinity_set(self, cpus): | |
1953 try: | |
1954 cext.proc_cpu_affinity_set(self.pid, cpus) | |
1955 except (OSError, ValueError) as err: | |
1956 if isinstance(err, ValueError) or err.errno == errno.EINVAL: | |
1957 eligible_cpus = self._get_eligible_cpus() | |
1958 all_cpus = tuple(range(len(per_cpu_times()))) | |
1959 for cpu in cpus: | |
1960 if cpu not in all_cpus: | |
1961 raise ValueError( | |
1962 "invalid CPU number %r; choose between %s" % ( | |
1963 cpu, eligible_cpus)) | |
1964 if cpu not in eligible_cpus: | |
1965 raise ValueError( | |
1966 "CPU number %r is not eligible; choose " | |
1967 "between %s" % (cpu, eligible_cpus)) | |
1968 raise | |
1969 | |
1970 # only starting from kernel 2.6.13 | |
1971 if HAS_PROC_IO_PRIORITY: | |
1972 | |
1973 @wrap_exceptions | |
1974 def ionice_get(self): | |
1975 ioclass, value = cext.proc_ioprio_get(self.pid) | |
1976 if enum is not None: | |
1977 ioclass = IOPriority(ioclass) | |
1978 return _common.pionice(ioclass, value) | |
1979 | |
1980 @wrap_exceptions | |
1981 def ionice_set(self, ioclass, value): | |
1982 if value is None: | |
1983 value = 0 | |
1984 if value and ioclass in (IOPRIO_CLASS_IDLE, IOPRIO_CLASS_NONE): | |
1985 raise ValueError("%r ioclass accepts no value" % ioclass) | |
1986 if value < 0 or value > 7: | |
1987 raise ValueError("value not in 0-7 range") | |
1988 return cext.proc_ioprio_set(self.pid, ioclass, value) | |
1989 | |
1990 if HAS_PRLIMIT: | |
1991 | |
1992 @wrap_exceptions | |
1993 def rlimit(self, resource, limits=None): | |
1994 # If pid is 0 prlimit() applies to the calling process and | |
1995 # we don't want that. We should never get here though as | |
1996 # PID 0 is not supported on Linux. | |
1997 if self.pid == 0: | |
1998 raise ValueError("can't use prlimit() against PID 0 process") | |
1999 try: | |
2000 if limits is None: | |
2001 # get | |
2002 return cext.linux_prlimit(self.pid, resource) | |
2003 else: | |
2004 # set | |
2005 if len(limits) != 2: | |
2006 raise ValueError( | |
2007 "second argument must be a (soft, hard) tuple, " | |
2008 "got %s" % repr(limits)) | |
2009 soft, hard = limits | |
2010 cext.linux_prlimit(self.pid, resource, soft, hard) | |
2011 except OSError as err: | |
2012 if err.errno == errno.ENOSYS and pid_exists(self.pid): | |
2013 # I saw this happening on Travis: | |
2014 # https://travis-ci.org/giampaolo/psutil/jobs/51368273 | |
2015 raise ZombieProcess(self.pid, self._name, self._ppid) | |
2016 else: | |
2017 raise | |
2018 | |
2019 @wrap_exceptions | |
2020 def status(self): | |
2021 letter = self._parse_stat_file()['status'] | |
2022 if PY3: | |
2023 letter = letter.decode() | |
2024 # XXX is '?' legit? (we're not supposed to return it anyway) | |
2025 return PROC_STATUSES.get(letter, '?') | |
2026 | |
2027 @wrap_exceptions | |
2028 def open_files(self): | |
2029 retlist = [] | |
2030 files = os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)) | |
2031 hit_enoent = False | |
2032 for fd in files: | |
2033 file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd) | |
2034 try: | |
2035 path = readlink(file) | |
2036 except (FileNotFoundError, ProcessLookupError): | |
2037 # ENOENT == file which is gone in the meantime | |
2038 hit_enoent = True | |
2039 continue | |
2040 except OSError as err: | |
2041 if err.errno == errno.EINVAL: | |
2042 # not a link | |
2043 continue | |
2044 raise | |
2045 else: | |
2046 # If path is not an absolute there's no way to tell | |
2047 # whether it's a regular file or not, so we skip it. | |
2048 # A regular file is always supposed to be have an | |
2049 # absolute path though. | |
2050 if path.startswith('/') and isfile_strict(path): | |
2051 # Get file position and flags. | |
2052 file = "%s/%s/fdinfo/%s" % ( | |
2053 self._procfs_path, self.pid, fd) | |
2054 try: | |
2055 with open_binary(file) as f: | |
2056 pos = int(f.readline().split()[1]) | |
2057 flags = int(f.readline().split()[1], 8) | |
2058 except FileNotFoundError: | |
2059 # fd gone in the meantime; process may | |
2060 # still be alive | |
2061 hit_enoent = True | |
2062 else: | |
2063 mode = file_flags_to_mode(flags) | |
2064 ntuple = popenfile( | |
2065 path, int(fd), int(pos), mode, flags) | |
2066 retlist.append(ntuple) | |
2067 if hit_enoent: | |
2068 self._assert_alive() | |
2069 return retlist | |
2070 | |
2071 @wrap_exceptions | |
2072 def connections(self, kind='inet'): | |
2073 ret = _connections.retrieve(kind, self.pid) | |
2074 self._assert_alive() | |
2075 return ret | |
2076 | |
2077 @wrap_exceptions | |
2078 def num_fds(self): | |
2079 return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) | |
2080 | |
2081 @wrap_exceptions | |
2082 def ppid(self): | |
2083 return int(self._parse_stat_file()['ppid']) | |
2084 | |
2085 @wrap_exceptions | |
2086 def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')): | |
2087 data = self._read_status_file() | |
2088 real, effective, saved = _uids_re.findall(data)[0] | |
2089 return _common.puids(int(real), int(effective), int(saved)) | |
2090 | |
2091 @wrap_exceptions | |
2092 def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')): | |
2093 data = self._read_status_file() | |
2094 real, effective, saved = _gids_re.findall(data)[0] | |
2095 return _common.pgids(int(real), int(effective), int(saved)) |