Vendor dependencies

Let's see how I like this workflow.
This commit is contained in:
John Doty 2022-12-19 08:27:18 -08:00
parent 34d1830413
commit 9c435dc440
7500 changed files with 1665121 additions and 99 deletions

1
vendor/procfs/.cargo-checksum.json vendored Normal file
View file

@ -0,0 +1 @@
{"files":{"COPYRIGHT.txt":"8e7164612c30045b3aece9dcc22e6af6754545e2a60720aa0bb7eaed493116ef","Cargo.lock":"418c2055cafdf971d393bf7926d564938d3c783b0dc751c2f7d42a19a8d556c6","Cargo.toml":"17b989a73bf548ec18cb9aa274c7b26955c8a59b6f0c480f2d9b08a3b073ee97","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"c5bbf39118b0639bf8bd391ae0d7d81f25c1cb4066e0fdae6a405b20fb7ca170","README.md":"af6bb9dc0e77dfa88f32dfa9a174d1292f8d3ccd9b067d5e1f609ef4b385f165","benches/cpuinfo.rs":"d1624b6326271000c689bb8648a31b2b57c0184b7c72599adb67722b8625cf3c","build.rs":"fc83fa80b8bd00251564fa1349448916e0b52a4e7c181b706663f535803e276f","clippy.toml":"747a6ac7cd2e79380df310bdc32ad330e2f723c3b7417a6f6c89a4b0c29b4c48","examples/README.md":"67828c48ad5f58fbaa3efcc00704a11a024a5f16a2ff3be234e7af3a4e6c6ea4","examples/diskstat.rs":"396e448cb1e7debf8e797bf776c564036ace318695122da2860f76d7ced9e574","examples/dump.rs":"8fa8faa531b5d8622017bde6208151be611998666233378346c52bce26e69797","examples/interface_stats.rs":"63c100e40039d73778fb997b1cfa23fde13df1b2d1308b2caa0ca52bc0565839","examples/lslocks.rs":"a9e82a62eed2b9e57a7e552ed7c9470a5601119903db8e2f7a0edca1b7fb0520","examples/lsmod.rs":"951c031172e78aa92f4c66e0330cdfdc21fc6de0493b34f8101e145fd05545af","examples/mountinfo.rs":"ef96e8dfdbe1014580cfb3729c7dc89492642dd7175f039bf56f0ffbaede9c0d","examples/netstat.rs":"d8f144495978e16f869d5f90be7bf4754c12a62098f5c670aac131fac8f9bf13","examples/pressure.rs":"b3b3bcbab68646c9f9b9b83410e698be7755dda4f87ccca109dfaa4e27f51b38","examples/process_hierarchy.rs":"62976e32c9c3e35dbfc962d1608c7b6343e6965617b2f3378ffca06d697b3253","examples/ps.rs":"a7f3ab5289b2c9b193f0a659d80a57c34924663bf811facac9ef3f987f99a8fb","examples/self_memory.rs":"c5754d76ef23f685fc86d1973d274f750f238ef3302940563ea7bb88ae5e7699","examples/shm.rs":"e0c42ea7801751b422a3e5c1b80cbe0fa15ce6a10eb6718f639cc104f5a03174","rustfmt.toml":"e090969e99df9360705680cc0097cfaddae10c22dc2e01470592cf3b9787fd36","src/cgroups.rs":"df221b4114a5c8246d22c89678b98cbac0d8e8c3f0e05b0b0c11273b97203649","src/cpuinfo.rs":"b7cddc32d2d634671dfacea7a534d0a069792f0ee03266347e34b46ac5aca02c","src/diskstats.rs":"c5319bc7f4c81cf9e3db6bfc5da306fabda39ef2fc2b7f23fb025403303ca1d3","src/keyring.rs":"e56db631fcba7153383169794279a54d4d24f1420c6a9c690184150bbda5a935","src/lib.rs":"e828263f5c7664a3ca92f2b1cba8a3b943aae6c53f013c44a87856de010c3534","src/locks.rs":"d3065d66d895f8325e9967721e9790e6d7f140ae3a3f3377ce6d9b214eea13a7","src/meminfo.rs":"84fa301a336ab599d3067b84b20ab7193524831206bf941b53aca9c54cd3bf47","src/net.rs":"cda018393a8b2b84d78ae3ac4cd168daa0e4ac415353f6b36fbe00ff94f01c2e","src/pressure.rs":"7927cfaf4a80f1006e47c5716a6c3326f5e151f67ae10731760afc1480efb2e6","src/process/limit.rs":"dcb4a7f7109f878db92dd21bf7eae224de263d1edf1b830a8b38d528d90b8a4f","src/process/mod.rs":"98266b5f084bfee1a01a035f43367fe5135dcb4d27ad6e87f8515b55b80e5bf0","src/process/mount.rs":"cf42bdee65fb09391cdd237dd98840ac80e55d3bcec99a43d05a451141b2133e","src/process/namespaces.rs":"c91255e69b6f1474aff3a27fa8241f1ddcef8e5c6c730e3bc3a117796b4f51fa","src/process/pagemap.rs":"3743c6c5c1225205f6f81d68cf47f38082285ac247a0b826a6c7b53f3ea78783","src/process/schedstat.rs":"22186736460216578b679e54f9febaaf5346f426ac823244eb8aeb173c55addb","src/process/smaps_rollup.rs":"1bf539cef1451b1da60deb5883839e7e7801555a8b2ce526abcf5806310cc834","src/process/stat.rs":"8e8adb19a02828a9853706f17c7ed33004f458735c8a9c4fd078686b248e3737","src/process/status.rs":"956408b0317503fa61787a7f32f40729afa48bd3d11618e59803ef800a4f2937","src/process/task.rs":"60690d9c4b4ecd6ce4aedafdca05f67d21b8f2f494023d0608ccff48bf65030b","src/process/tests.rs":"7a00e216fc3de9fcede5e8b2110d1072f8d95597cfd23abf1250a2acf0eb1b4a","src/sys/fs/binfmt_misc.rs":"87c28497f3c44f2063b932b53d57f679307f72ec2578c0df8c297be154423aef","src/sys/fs/epoll.rs":"845eebd9cf13a034ed0768a2ce079957739c856f8824c326273d945432977fed","src/sys/fs/mod.rs":"38e709d9b1f7c6d87c701684badf7d24d57ad8ef5e8cc5b0f1cb7f6ec33b8285","src/sys/kernel/keys.rs":"c6492123eb6990cc6283df457c6a770aff71a2748a2aa92be224db1797007c93","src/sys/kernel/mod.rs":"9793686c008bec2509e3a0327b326d80cfd4075486107f46f36b5e0cd085d49d","src/sys/kernel/random.rs":"3e2e9855a5c37bfb07789650a58576acc8ec5d919e3bbd62d1823404fd28e722","src/sys/mod.rs":"1853ac1ccdc3615c3d6a47a5c9eca7c1d63819dc1ea43dd07801d3bf84c45829","src/sys/vm.rs":"ddad15dd74af386ecbb5d7743be7c9eacafff10fb5847cc53a8b0bb69952a6fd","src/sysvipc_shm.rs":"51ed5496b167eb3292fd1e1aadc44b3aa805a0f47a6b78ac9faddd44efd7631c","src/uptime.rs":"e450a94ddb0364b025254fba35943481c1f9641bdca0d8d14d6d748939fb3f3f","support.md":"7306064c907080ff941b479d6e5337595b390090c9b382401dd796339efe4cc7"},"package":"b1de8dacb0873f77e6aefc6d71e044761fcc68060290f5b1089fcdf84626bb69"}

483
vendor/procfs/COPYRIGHT.txt vendored Normal file
View file

@ -0,0 +1,483 @@
The source code for the procfs library is copyright by Andrew Chin, 2019, and other contributors.
It is icensed under either of
* Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
* MIT license, http://opensource.org/licenses/MIT
at your option.
The documentation of this library is derived from documentation written by others:
* The proc(5) man page:
Copyright (C) 1994, 1995 by Daniel Quinlan (quinlan@yggdrasil.com)
and Copyright (C) 2002-2008,2017 Michael Kerrisk <mtk.manpages@gmail.com>
with networking additions from Alan Cox (A.Cox@swansea.ac.uk)
and scsi additions from Michael Neuffer (neuffer@mail.uni-mainz.de)
and sysctl additions from Andries Brouwer (aeb@cwi.nl)
and System V IPC (as well as various other) additions from
Michael Kerrisk <mtk.manpages@gmail.com>
Under the GPL Free Documentation License (reproduced below).
* Other manual pages:
Copyright (c) 2006, 2008 by Michael Kerrisk <mtk.manpages@gmail.com>
Under the following license:
Permission is granted to make and distribute verbatim copies of this
manual provided the copyright notice and this permission notice are
preserved on all copies.
Permission is granted to copy and distribute modified versions of this
manual under the conditions for verbatim copying, provided that the
entire resulting derived work is distributed under the terms of a
permission notice identical to this one.
Since the Linux kernel and libraries are constantly changing, this
manual page may be incorrect or out-of-date. The author(s) assume no
responsibility for errors or omissions, or for damages resulting from
the use of the information contained herein. The author(s) may not
have taken the same level of care in the production of this manual,
which is licensed free of charge, as they might when working
professionally.
Formatted or processed versions of this manual, if unaccompanied by
the source, must acknowledge the copyright and authors of this work.
* The Linux Documentation Project:
Copyright 2003 Binh Nguyen
Under the GPL Free Documenation License. See: http://tldp.org/LDP/Linux-Filesystem-Hierarchy/html/ln14.html
==================================
Below is a copy of the GPL license:
This is free documentation; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
The GNU General Public License's references to "object code"
and "executables" are to be interpreted as the output of any
document formatting or typesetting system, including
intermediate and printed output.
This manual is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this manual; if not, see
<http://www.gnu.org/licenses/>.
==================================
A full copy of the GNU Free Documentation License, version 1.2, can be found here:
https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt
Below is a copy of this license:
GNU Free Documentation License
Version 1.2, November 2002
Copyright (C) 2000,2001,2002 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
0. PREAMBLE
The purpose of this License is to make a manual, textbook, or other
functional and useful document "free" in the sense of freedom: to
assure everyone the effective freedom to copy and redistribute it,
with or without modifying it, either commercially or noncommercially.
Secondarily, this License preserves for the author and publisher a way
to get credit for their work, while not being considered responsible
for modifications made by others.
This License is a kind of "copyleft", which means that derivative
works of the document must themselves be free in the same sense. It
complements the GNU General Public License, which is a copyleft
license designed for free software.
We have designed this License in order to use it for manuals for free
software, because free software needs free documentation: a free
program should come with manuals providing the same freedoms that the
software does. But this License is not limited to software manuals;
it can be used for any textual work, regardless of subject matter or
whether it is published as a printed book. We recommend this License
principally for works whose purpose is instruction or reference.
1. APPLICABILITY AND DEFINITIONS
This License applies to any manual or other work, in any medium, that
contains a notice placed by the copyright holder saying it can be
distributed under the terms of this License. Such a notice grants a
world-wide, royalty-free license, unlimited in duration, to use that
work under the conditions stated herein. The "Document", below,
refers to any such manual or work. Any member of the public is a
licensee, and is addressed as "you". You accept the license if you
copy, modify or distribute the work in a way requiring permission
under copyright law.
A "Modified Version" of the Document means any work containing the
Document or a portion of it, either copied verbatim, or with
modifications and/or translated into another language.
A "Secondary Section" is a named appendix or a front-matter section of
the Document that deals exclusively with the relationship of the
publishers or authors of the Document to the Document's overall subject
(or to related matters) and contains nothing that could fall directly
within that overall subject. (Thus, if the Document is in part a
textbook of mathematics, a Secondary Section may not explain any
mathematics.) The relationship could be a matter of historical
connection with the subject or with related matters, or of legal,
commercial, philosophical, ethical or political position regarding
them.
The "Invariant Sections" are certain Secondary Sections whose titles
are designated, as being those of Invariant Sections, in the notice
that says that the Document is released under this License. If a
section does not fit the above definition of Secondary then it is not
allowed to be designated as Invariant. The Document may contain zero
Invariant Sections. If the Document does not identify any Invariant
Sections then there are none.
The "Cover Texts" are certain short passages of text that are listed,
as Front-Cover Texts or Back-Cover Texts, in the notice that says that
the Document is released under this License. A Front-Cover Text may
be at most 5 words, and a Back-Cover Text may be at most 25 words.
A "Transparent" copy of the Document means a machine-readable copy,
represented in a format whose specification is available to the
general public, that is suitable for revising the document
straightforwardly with generic text editors or (for images composed of
pixels) generic paint programs or (for drawings) some widely available
drawing editor, and that is suitable for input to text formatters or
for automatic translation to a variety of formats suitable for input
to text formatters. A copy made in an otherwise Transparent file
format whose markup, or absence of markup, has been arranged to thwart
or discourage subsequent modification by readers is not Transparent.
An image format is not Transparent if used for any substantial amount
of text. A copy that is not "Transparent" is called "Opaque".
Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, LaTeX input format, SGML
or XML using a publicly available DTD, and standard-conforming simple
HTML, PostScript or PDF designed for human modification. Examples of
transparent image formats include PNG, XCF and JPG. Opaque formats
include proprietary formats that can be read and edited only by
proprietary word processors, SGML or XML for which the DTD and/or
processing tools are not generally available, and the
machine-generated HTML, PostScript or PDF produced by some word
processors for output purposes only.
The "Title Page" means, for a printed book, the title page itself,
plus such following pages as are needed to hold, legibly, the material
this License requires to appear in the title page. For works in
formats which do not have any title page as such, "Title Page" means
the text near the most prominent appearance of the work's title,
preceding the beginning of the body of the text.
A section "Entitled XYZ" means a named subunit of the Document whose
title either is precisely XYZ or contains XYZ in parentheses following
text that translates XYZ in another language. (Here XYZ stands for a
specific section name mentioned below, such as "Acknowledgements",
"Dedications", "Endorsements", or "History".) To "Preserve the Title"
of such a section when you modify the Document means that it remains a
section "Entitled XYZ" according to this definition.
The Document may include Warranty Disclaimers next to the notice which
states that this License applies to the Document. These Warranty
Disclaimers are considered to be included by reference in this
License, but only as regards disclaiming warranties: any other
implication that these Warranty Disclaimers may have is void and has
no effect on the meaning of this License.
2. VERBATIM COPYING
You may copy and distribute the Document in any medium, either
commercially or noncommercially, provided that this License, the
copyright notices, and the license notice saying this License applies
to the Document are reproduced in all copies, and that you add no other
conditions whatsoever to those of this License. You may not use
technical measures to obstruct or control the reading or further
copying of the copies you make or distribute. However, you may accept
compensation in exchange for copies. If you distribute a large enough
number of copies you must also follow the conditions in section 3.
You may also lend copies, under the same conditions stated above, and
you may publicly display copies.
3. COPYING IN QUANTITY
If you publish printed copies (or copies in media that commonly have
printed covers) of the Document, numbering more than 100, and the
Document's license notice requires Cover Texts, you must enclose the
copies in covers that carry, clearly and legibly, all these Cover
Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
the back cover. Both covers must also clearly and legibly identify
you as the publisher of these copies. The front cover must present
the full title with all words of the title equally prominent and
visible. You may add other material on the covers in addition.
Copying with changes limited to the covers, as long as they preserve
the title of the Document and satisfy these conditions, can be treated
as verbatim copying in other respects.
If the required texts for either cover are too voluminous to fit
legibly, you should put the first ones listed (as many as fit
reasonably) on the actual cover, and continue the rest onto adjacent
pages.
If you publish or distribute Opaque copies of the Document numbering
more than 100, you must either include a machine-readable Transparent
copy along with each Opaque copy, or state in or with each Opaque copy
a computer-network location from which the general network-using
public has access to download using public-standard network protocols
a complete Transparent copy of the Document, free of added material.
If you use the latter option, you must take reasonably prudent steps,
when you begin distribution of Opaque copies in quantity, to ensure
that this Transparent copy will remain thus accessible at the stated
location until at least one year after the last time you distribute an
Opaque copy (directly or through your agents or retailers) of that
edition to the public.
It is requested, but not required, that you contact the authors of the
Document well before redistributing any large number of copies, to give
them a chance to provide you with an updated version of the Document.
4. MODIFICATIONS
You may copy and distribute a Modified Version of the Document under
the conditions of sections 2 and 3 above, provided that you release
the Modified Version under precisely this License, with the Modified
Version filling the role of the Document, thus licensing distribution
and modification of the Modified Version to whoever possesses a copy
of it. In addition, you must do these things in the Modified Version:
A. Use in the Title Page (and on the covers, if any) a title distinct
from that of the Document, and from those of previous versions
(which should, if there were any, be listed in the History section
of the Document). You may use the same title as a previous version
if the original publisher of that version gives permission.
B. List on the Title Page, as authors, one or more persons or entities
responsible for authorship of the modifications in the Modified
Version, together with at least five of the principal authors of the
Document (all of its principal authors, if it has fewer than five),
unless they release you from this requirement.
C. State on the Title page the name of the publisher of the
Modified Version, as the publisher.
D. Preserve all the copyright notices of the Document.
E. Add an appropriate copyright notice for your modifications
adjacent to the other copyright notices.
F. Include, immediately after the copyright notices, a license notice
giving the public permission to use the Modified Version under the
terms of this License, in the form shown in the Addendum below.
G. Preserve in that license notice the full lists of Invariant Sections
and required Cover Texts given in the Document's license notice.
H. Include an unaltered copy of this License.
I. Preserve the section Entitled "History", Preserve its Title, and add
to it an item stating at least the title, year, new authors, and
publisher of the Modified Version as given on the Title Page. If
there is no section Entitled "History" in the Document, create one
stating the title, year, authors, and publisher of the Document as
given on its Title Page, then add an item describing the Modified
Version as stated in the previous sentence.
J. Preserve the network location, if any, given in the Document for
public access to a Transparent copy of the Document, and likewise
the network locations given in the Document for previous versions
it was based on. These may be placed in the "History" section.
You may omit a network location for a work that was published at
least four years before the Document itself, or if the original
publisher of the version it refers to gives permission.
K. For any section Entitled "Acknowledgements" or "Dedications",
Preserve the Title of the section, and preserve in the section all
the substance and tone of each of the contributor acknowledgements
and/or dedications given therein.
L. Preserve all the Invariant Sections of the Document,
unaltered in their text and in their titles. Section numbers
or the equivalent are not considered part of the section titles.
M. Delete any section Entitled "Endorsements". Such a section
may not be included in the Modified Version.
N. Do not retitle any existing section to be Entitled "Endorsements"
or to conflict in title with any Invariant Section.
O. Preserve any Warranty Disclaimers.
If the Modified Version includes new front-matter sections or
appendices that qualify as Secondary Sections and contain no material
copied from the Document, you may at your option designate some or all
of these sections as invariant. To do this, add their titles to the
list of Invariant Sections in the Modified Version's license notice.
These titles must be distinct from any other section titles.
You may add a section Entitled "Endorsements", provided it contains
nothing but endorsements of your Modified Version by various
parties--for example, statements of peer review or that the text has
been approved by an organization as the authoritative definition of a
standard.
You may add a passage of up to five words as a Front-Cover Text, and a
passage of up to 25 words as a Back-Cover Text, to the end of the list
of Cover Texts in the Modified Version. Only one passage of
Front-Cover Text and one of Back-Cover Text may be added by (or
through arrangements made by) any one entity. If the Document already
includes a cover text for the same cover, previously added by you or
by arrangement made by the same entity you are acting on behalf of,
you may not add another; but you may replace the old one, on explicit
permission from the previous publisher that added the old one.
The author(s) and publisher(s) of the Document do not by this License
give permission to use their names for publicity for or to assert or
imply endorsement of any Modified Version.
5. COMBINING DOCUMENTS
You may combine the Document with other documents released under this
License, under the terms defined in section 4 above for modified
versions, provided that you include in the combination all of the
Invariant Sections of all of the original documents, unmodified, and
list them all as Invariant Sections of your combined work in its
license notice, and that you preserve all their Warranty Disclaimers.
The combined work need only contain one copy of this License, and
multiple identical Invariant Sections may be replaced with a single
copy. If there are multiple Invariant Sections with the same name but
different contents, make the title of each such section unique by
adding at the end of it, in parentheses, the name of the original
author or publisher of that section if known, or else a unique number.
Make the same adjustment to the section titles in the list of
Invariant Sections in the license notice of the combined work.
In the combination, you must combine any sections Entitled "History"
in the various original documents, forming one section Entitled
"History"; likewise combine any sections Entitled "Acknowledgements",
and any sections Entitled "Dedications". You must delete all sections
Entitled "Endorsements".
6. COLLECTIONS OF DOCUMENTS
You may make a collection consisting of the Document and other documents
released under this License, and replace the individual copies of this
License in the various documents with a single copy that is included in
the collection, provided that you follow the rules of this License for
verbatim copying of each of the documents in all other respects.
You may extract a single document from such a collection, and distribute
it individually under this License, provided you insert a copy of this
License into the extracted document, and follow this License in all
other respects regarding verbatim copying of that document.
7. AGGREGATION WITH INDEPENDENT WORKS
A compilation of the Document or its derivatives with other separate
and independent documents or works, in or on a volume of a storage or
distribution medium, is called an "aggregate" if the copyright
resulting from the compilation is not used to limit the legal rights
of the compilation's users beyond what the individual works permit.
When the Document is included in an aggregate, this License does not
apply to the other works in the aggregate which are not themselves
derivative works of the Document.
If the Cover Text requirement of section 3 is applicable to these
copies of the Document, then if the Document is less than one half of
the entire aggregate, the Document's Cover Texts may be placed on
covers that bracket the Document within the aggregate, or the
electronic equivalent of covers if the Document is in electronic form.
Otherwise they must appear on printed covers that bracket the whole
aggregate.
8. TRANSLATION
Translation is considered a kind of modification, so you may
distribute translations of the Document under the terms of section 4.
Replacing Invariant Sections with translations requires special
permission from their copyright holders, but you may include
translations of some or all Invariant Sections in addition to the
original versions of these Invariant Sections. You may include a
translation of this License, and all the license notices in the
Document, and any Warranty Disclaimers, provided that you also include
the original English version of this License and the original versions
of those notices and disclaimers. In case of a disagreement between
the translation and the original version of this License or a notice
or disclaimer, the original version will prevail.
If a section in the Document is Entitled "Acknowledgements",
"Dedications", or "History", the requirement (section 4) to Preserve
its Title (section 1) will typically require changing the actual
title.
9. TERMINATION
You may not copy, modify, sublicense, or distribute the Document except
as expressly provided for under this License. Any other attempt to
copy, modify, sublicense or distribute the Document is void, and will
automatically terminate your rights under this License. However,
parties who have received copies, or rights, from you under this
License will not have their licenses terminated so long as such
parties remain in full compliance.
10. FUTURE REVISIONS OF THIS LICENSE
The Free Software Foundation may publish new, revised versions
of the GNU Free Documentation License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns. See
https://www.gnu.org/licenses/.
Each version of the License is given a distinguishing version number.
If the Document specifies that a particular numbered version of this
License "or any later version" applies to it, you have the option of
following the terms and conditions either of that specified version or
of any later version that has been published (not as a draft) by the
Free Software Foundation. If the Document does not specify a version
number of this License, you may choose any version ever published (not
as a draft) by the Free Software Foundation.
ADDENDUM: How to use this License for your documents
To use this License in a document you have written, include a copy of
the License in the document and put the following copyright and
license notices just after the title page:
Copyright (c) YEAR YOUR NAME.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.2
or any later version published by the Free Software Foundation;
with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
A copy of the license is included in the section entitled "GNU
Free Documentation License".
If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
replace the "with...Texts." line with this:
with the Invariant Sections being LIST THEIR TITLES, with the
Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
If you have Invariant Sections without Cover Texts, or some other
combination of the three, merge those two alternatives to suit the
situation.
If your document contains nontrivial examples of program code, we
recommend releasing these examples in parallel under your choice of
free software license, such as the GNU General Public License,
to permit their use in free software.

1039
vendor/procfs/Cargo.lock generated vendored Normal file

File diff suppressed because it is too large Load diff

96
vendor/procfs/Cargo.toml vendored Normal file
View file

@ -0,0 +1,96 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.48"
name = "procfs"
version = "0.14.2"
authors = ["Andrew Chin <achin@eminence32.net>"]
description = "Interface to the linux procfs pseudo-filesystem"
documentation = "https://docs.rs/procfs/"
readme = "README.md"
keywords = [
"procfs",
"proc",
"linux",
"process",
]
categories = [
"os::unix-apis",
"filesystem",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/eminence/procfs"
[package.metadata.docs.rs]
all-features = true
[[bench]]
name = "cpuinfo"
harness = false
[dependencies.backtrace]
version = "0.3"
optional = true
[dependencies.bitflags]
version = "1.2"
[dependencies.byteorder]
version = "1.2.3"
features = ["i128"]
[dependencies.chrono]
version = "0.4.20"
features = ["clock"]
optional = true
default-features = false
[dependencies.flate2]
version = "1.0.3"
optional = true
[dependencies.hex]
version = "0.4"
[dependencies.lazy_static]
version = "1.0.2"
[dependencies.rustix]
version = "0.36.0"
features = [
"fs",
"process",
"param",
"thread",
]
[dependencies.serde]
version = "1.0"
features = ["derive"]
optional = true
[dev-dependencies.criterion]
version = "0.3"
[dev-dependencies.failure]
version = "0.1"
[dev-dependencies.procinfo]
version = "0.4.2"
[features]
default = [
"chrono",
"flate2",
]
serde1 = ["serde"]

202
vendor/procfs/LICENSE-APACHE vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/procfs/LICENSE-MIT vendored Normal file
View file

@ -0,0 +1,19 @@
Copyright (c) 2015 The procfs Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

105
vendor/procfs/README.md vendored Normal file
View file

@ -0,0 +1,105 @@
procfs
======
[![Crate](https://img.shields.io/crates/v/procfs.svg)](https://crates.io/crates/procfs)
[![Docs](https://docs.rs/procfs/badge.svg)](https://docs.rs/procfs)
[![Minimum rustc version](https://img.shields.io/badge/rustc-1.48+-lightgray.svg)](https://github.com/eminence/procfs#minimum-rust-version)
This crate is an interface to the `proc` pseudo-filesystem on linux, which is normally mounted as `/proc`.
Long-term, this crate aims to be fairly feature complete, but at the moment not all files are exposed.
See the docs for info on what's supported, or view the [support.md](https://github.com/eminence/procfs/blob/master/support.md)
file in the code repository.
## Examples
There are several examples in the docs and in the [examples folder](https://github.com/eminence/procfs/tree/master/examples)
of the code repository.
Here's a small example that prints out all processes that are running on the same tty as the calling
process. This is very similar to what "ps" does in its default mode:
```rust
fn main() {
let me = procfs::process::Process::myself().unwrap();
let me_stat = me.stat().unwrap();
let tps = procfs::ticks_per_second().unwrap();
println!("{: >5} {: <8} {: >8} {}", "PID", "TTY", "TIME", "CMD");
let tty = format!("pty/{}", me_stat.tty_nr().1);
for prc in procfs::process::all_processes().unwrap() {
let prc = prc.unwrap();
let stat = prc.stat().unwrap();
if stat.tty_nr == me_stat.tty_nr {
// total_time is in seconds
let total_time =
(stat.utime + stat.stime) as f32 / (tps as f32);
println!(
"{: >5} {: <8} {: >8} {}",
stat.pid, tty, total_time, stat.comm
);
}
}
}
```
Here's another example that shows how to get the current memory usage of the current process:
```rust
use procfs::process::Process;
fn main() {
let me = Process::myself().unwrap();
let me_stat = me.stat().unwrap();
println!("PID: {}", me.pid);
let page_size = procfs::page_size().unwrap() as u64;
println!("Memory page size: {}", page_size);
println!("== Data from /proc/self/stat:");
println!("Total virtual memory used: {} bytes", me_stat.vsize);
println!("Total resident set: {} pages ({} bytes)", me_stat.rss, me_stat.rss as u64 * page_size);
}
```
There are a few ways to get this data, so also checkout the longer
[self_memory](https://github.com/eminence/procfs/blob/master/examples/self_memory.rs) example for more
details.
## Cargo features
The following cargo features are available:
* `chrono` -- Default. Optional. This feature enables a few methods that return values as `DateTime` objects.
* `flate2` -- Default. Optional. This feature enables parsing gzip compressed `/proc/config.gz` file via the `procfs::kernel_config` method.
* `backtrace` -- Optional. This feature lets you get a stack trace whenever an `InternalError` is raised.
* `serde1` -- Optional. This feature allows most structs to be serialized and deserialized using serde 1.0. Note, this
feature requires a version of rust newer than 1.48.0 (which is the MSRV for procfs). The exact version required is not
specified here, since serde does not not have an MSRV policy.
## Minimum Rust Version
This crate requires a minimum rust version of 1.48.0 (2020-11-19).
## License
The procfs library is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
For additional copyright information regarding documentation, please also see the COPYRIGHT.txt file.
### Contribution
Contributions are welcome, especially in the areas of documentation and testing on older kernels.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

16
vendor/procfs/benches/cpuinfo.rs vendored Normal file
View file

@ -0,0 +1,16 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use procfs::CpuInfo;
fn bench_cpuinfo(c: &mut Criterion) {
c.bench_function("CpuInfo::new", |b| b.iter(|| black_box(CpuInfo::new().unwrap())));
let cpuinfo = black_box(CpuInfo::new().unwrap());
c.bench_function("CpuInfo::get_info", |b| b.iter(|| black_box(cpuinfo.get_info(0))));
c.bench_function("CpuInfo::model_name", |b| b.iter(|| cpuinfo.model_name(0)));
c.bench_function("CpuInfo::vendor_id", |b| b.iter(|| cpuinfo.vendor_id(0)));
c.bench_function("CpuInfo::physical_id", |b| b.iter(|| cpuinfo.physical_id(0)));
c.bench_function("CpuInfo::flags", |b| b.iter(|| cpuinfo.flags(0)));
}
criterion_group!(benches, bench_cpuinfo);
criterion_main!(benches);

9
vendor/procfs/build.rs vendored Normal file
View file

@ -0,0 +1,9 @@
fn main() {
// Filters are extracted from `libc` filters
let target_os = std::env::var("CARGO_CFG_TARGET_OS").expect("Missing CARGO_CFG_TARGET_OS envvar");
if !["android", "linux", "l4re"].contains(&target_os.as_str()) {
eprintln!("Building procfs on an for a unsupported platform. Currently only linux and android are supported");
eprintln!("(Your current target_os is {})", target_os);
std::process::exit(1)
}
}

1
vendor/procfs/clippy.toml vendored Normal file
View file

@ -0,0 +1 @@
msrv = "1.48"

144
vendor/procfs/examples/README.md vendored Normal file
View file

@ -0,0 +1,144 @@
# Examples
These examples can be run by running `cargo run --example example_name`
## dump.rs
Prints out details about the current process (the dumper itself), or a process specifed by PID
## interface_stats.rs
Runs continually and prints out how many bytes/packets are sent/received. Press ctrl-c to exit the example:
```text
Interface: bytes recv bytes sent
================ ==================== ====================
br-883c4c992deb: 823307769 0.2 kbps 1537694158 0.5 kbps
br-d73af6e6d094: 9137600399 0.9 kbps 2334717319 0.4 kbps
docker0: 2938964881 0.6 kbps 19291691656 11.4 kbps
docker_gwbridge: 1172300 0.0 kbps 15649536 0.0 kbps
enp5s0f0: 44643307888420 5599.8 kbps 1509415976135 99.0 kbps
enp5s0f1: 0 0.0 kbps 0 0.0 kbps
lo: 161143108162 0.4 kbps 161143108162 0.4 kbps
veth3154ff3: 3809619534 1.0 kbps 867529906 0.4 kbps
veth487bc9b: 2650532684 0.8 kbps 2992458899 0.9 kbps
veth8cb8ca8: 3234030733 0.7 kbps 16921098378 11.4 kbps
vethbadbe14: 12007615348 3.8 kbps 15583195644 5.0 kbps
vethc152f93: 978828 0.0 kbps 3839134 0.0 kbps
vethe481f30: 1637142 0.0 kbps 15805768 0.0 kbps
vethfac2e83: 19445827683 6.2 kbps 16194181515 5.1 kbps
```
## netstat.rs
Prints out all open and listening TCP/UDP sockets, along with the owning process. The
output format is very similar to the standard `netstat` linux utility:
```text
Local address Remote address State Inode PID/Program name
0.0.0.0:53 0.0.0.0:0 Listen 30883 1409/pdns_server
0.0.0.0:51413 0.0.0.0:0 Listen 24263 927/transmission-da
0.0.0.0:35445 0.0.0.0:0 Listen 21777 942/rpc.mountd
0.0.0.0:22 0.0.0.0:0 Listen 27973 1149/sshd
0.0.0.0:25 0.0.0.0:0 Listen 28295 1612/master
```
## pressure.rs
Prints out CPU/IO/Memory pressure information
## ps.rs
Prints out all processes that share the same tty as the current terminal. This is very similar to the standard
`ps` utility on linux when run with no arguments:
```text
PID TTY TIME CMD
8369 pty/13 4.05 bash
23124 pty/13 0.23 basic-http-serv
24206 pty/13 0.11 ps
```
## self_memory.rs
Shows several ways to get the current memory usage of the current process
```text
PID: 21867
Memory page size: 4096
== Data from /proc/self/stat:
Total virtual memory used: 3436544 bytes
Total resident set: 220 pages (901120 bytes)
== Data from /proc/self/statm:
Total virtual memory used: 839 pages (3436544 bytes)
Total resident set: 220 pages (901120 byte)s
Total shared memory: 191 pages (782336 bytes)
== Data from /proc/self/status:
Total virtual memory used: 3436544 bytes
Total resident set: 901120 bytes
Total shared memory: 782336 bytes
```
## lsmod.rs
This lists all the loaded kernel modules, in a simple tree format.
## diskstat.rs
Lists IO information for local disks:
```text
sda1 mounted on /:
total reads: 7325390 (13640070 ms)
total writes: 124191552 (119109541 ms)
total flushes: 0 (0 ms)
```
Note: only local disks will be shown (not NFS mounts,
and disks used for ZFS will not be shown either).
## lslocks.rs
Shows current file locks in a format that is similiar to the `lslocks` utility.
## mountinfo.rs
Lists all mountpoints, along with their type and options:
```text
sysfs on /sys type sysfs (noexec,relatime,nodev,rw,nosuid)
proc on /proc type proc (noexec,rw,nodev,relatime,nosuid)
udev on /dev type devtmpfs (rw,nosuid,relatime)
mode = 755
nr_inodes = 4109298
size = 16437192k
devpts on /dev/pts type devpts (nosuid,rw,noexec,relatime)
gid = 5
ptmxmode = 000
mode = 620
tmpfs on /run type tmpfs (rw,nosuid,noexec,relatime)
size = 3291852k
mode = 755
/dev/sda1 on / type ext4 (rw,relatime)
errors = remount-ro
```
## process_hierarchy.rs
Lists all processes as a tree. Sub-processes will be hierarchically ordered beneath their parents.
```text
1 /usr/lib/systemd/systemd --system --deserialize 54
366 /usr/lib/systemd/systemd-journald
375 /usr/lib/systemd/systemd-udevd
383 /usr/bin/lvmetad -f
525 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation --syslog-only
529 /usr/bin/syncthing -no-browser -no-restart -logflags=0
608 /usr/bin/syncthing -no-browser -no-restart -logflags=0
530 /usr/lib/systemd/systemd-logind
...
```

31
vendor/procfs/examples/diskstat.rs vendored Normal file
View file

@ -0,0 +1,31 @@
use procfs::{diskstats, process::Process, DiskStat};
use std::collections::HashMap;
use std::iter::FromIterator;
fn main() {
let me = Process::myself().unwrap();
let mounts = me.mountinfo().unwrap();
// Get a list of all disks that we have IO stat info on
let disk_stats: HashMap<(i32, i32), DiskStat> =
HashMap::from_iter(diskstats().unwrap().into_iter().map(|i| ((i.major, i.minor), i)));
for mount in mounts {
// parse the majmin string (something like "0:3") into an (i32, i32) tuple
let (maj, min): (i32, i32) = {
let mut s = mount.majmin.split(':');
(s.next().unwrap().parse().unwrap(), s.next().unwrap().parse().unwrap())
};
if let Some(stat) = disk_stats.get(&(maj, min)) {
println!("{} mounted on {}:", stat.name, mount.mount_point.display());
println!(" total reads: {} ({} ms)", stat.reads, stat.time_reading);
println!(" total writes: {} ({} ms)", stat.writes, stat.time_writing);
println!(
" total flushes: {} ({} ms)",
stat.flushes.unwrap_or(0),
stat.time_flushing.unwrap_or(0)
);
}
}
}

17
vendor/procfs/examples/dump.rs vendored Normal file
View file

@ -0,0 +1,17 @@
extern crate procfs;
fn main() {
let pid = std::env::args().nth(1).and_then(|s| s.parse::<i32>().ok());
let prc = if let Some(pid) = pid {
println!("Info for pid={}", pid);
procfs::process::Process::new(pid).unwrap()
} else {
procfs::process::Process::myself().unwrap()
};
println!("{:#?}", prc);
let stat = prc.stat().unwrap();
println!("State: {:?}", stat.state());
println!("RSS: {} bytes", stat.rss_bytes().unwrap());
}

View file

@ -0,0 +1,41 @@
//! For each interface, display the number of bytes sent and received, along with a data rate
fn main() {
let delay = std::time::Duration::from_secs(2);
let mut prev_stats = procfs::net::dev_status().unwrap();
let mut prev_now = std::time::Instant::now();
loop {
std::thread::sleep(delay);
let now = std::time::Instant::now();
let dev_stats = procfs::net::dev_status().unwrap();
// calculate diffs from previous
let dt = (now - prev_now).as_millis() as f32 / 1000.0;
let mut stats: Vec<_> = dev_stats.values().collect();
stats.sort_by_key(|s| &s.name);
println!();
println!(
"{:>16}: {:<20} {:<20} ",
"Interface", "bytes recv", "bytes sent"
);
println!(
"{:>16} {:<20} {:<20}",
"================", "====================", "===================="
);
for stat in stats {
println!(
"{:>16}: {:<20} {:>6.1} kbps {:<20} {:>6.1} kbps ",
stat.name,
stat.recv_bytes,
(stat.recv_bytes - prev_stats.get(&stat.name).unwrap().recv_bytes) as f32 / dt / 1000.0,
stat.sent_bytes,
(stat.sent_bytes - prev_stats.get(&stat.name).unwrap().sent_bytes) as f32 / dt / 1000.0
);
}
prev_stats = dev_stats;
prev_now = now;
}
}

67
vendor/procfs/examples/lslocks.rs vendored Normal file
View file

@ -0,0 +1,67 @@
use procfs::process::{FDTarget, Process};
use rustix::fs::AtFlags;
use std::path::Path;
fn main() {
let myself = Process::myself().unwrap();
let mountinfo = myself.mountinfo().unwrap();
for lock in procfs::locks().unwrap() {
lock.pid
.and_then(|pid| Process::new(pid).ok())
.and_then(|proc| proc.cmdline().ok())
.and_then(|mut cmd| cmd.drain(..).next())
.map_or_else(
|| {
print!("{:18}", "(undefined)");
},
|s| {
let p = Path::new(&s);
print!("{:18}", p.file_name().unwrap_or(p.as_os_str()).to_string_lossy());
},
);
print!("{:<12} ", lock.pid.unwrap_or(-1));
print!("{:12} ", lock.lock_type.as_str());
print!("{:12} ", lock.mode.as_str());
print!("{:12} ", lock.kind.as_str());
// try to find the path for this inode
let mut found = false;
if let Some(pid) = lock.pid {
if let Ok(fds) = Process::new(pid).and_then(|p| p.fd()) {
for f in fds {
let fd = f.unwrap();
if let FDTarget::Path(p) = fd.target {
if let Ok(stat) = rustix::fs::statat(&rustix::fs::cwd(), &p, AtFlags::empty()) {
if stat.st_ino as u64 == lock.inode {
print!("{}", p.display());
found = true;
break;
}
}
}
}
}
}
if !found {
// we don't have a PID or we don't have permission to inspect the processes files, but we still have the device and inode
// There's no way to look up a path from an inode, so just bring the device mount point
for mount in &mountinfo {
if format!("{}:{}", lock.devmaj, lock.devmin) == mount.majmin {
print!("{}...", mount.mount_point.display());
found = true;
break;
}
}
}
if !found {
// still not found? print the device
print!("{}:{}", lock.devmaj, lock.devmin);
}
println!();
}
}

30
vendor/procfs/examples/lsmod.rs vendored Normal file
View file

@ -0,0 +1,30 @@
use std::collections::HashMap;
fn print(name: &str, indent: usize, mods: &HashMap<&str, Vec<&str>>) {
println!("{}{} {}", if indent == 0 { "-" } else { " " }, " ".repeat(indent), name);
if let Some(uses_list) = mods.get(name) {
for name in uses_list {
print(name, indent + 2, mods);
}
}
}
fn main() {
let modules = procfs::modules().unwrap();
// each module has a list of what other modules use it. Let's invert this and create a list of the modules used by each module.
// This maps a module name to a list of modules that it uses
let mut map: HashMap<&str, Vec<&str>> = HashMap::new();
for module in modules.values() {
for name in &module.used_by {
map.entry(name).or_default().push(&module.name);
}
}
// println!("{:?}", map["xt_policy"]);
for modname in map.keys() {
print(modname, 0, &map);
}
}

28
vendor/procfs/examples/mountinfo.rs vendored Normal file
View file

@ -0,0 +1,28 @@
use procfs::process::Process;
use std::collections::HashSet;
fn main() {
for mount in Process::myself().unwrap().mountinfo().unwrap() {
let (a, b): (HashSet<_>, HashSet<_>) = mount
.mount_options
.into_iter()
.chain(mount.super_options)
.partition(|&(_, ref m)| m.is_none());
println!(
"{} on {} type {} ({})",
mount.mount_source.unwrap_or_else(|| "None".to_string()),
mount.mount_point.display(),
mount.fs_type,
a.into_iter().map(|(k, _)| k).collect::<Vec<_>>().join(",")
);
for (opt, val) in b {
if let Some(val) = val {
println!(" {} = {}", opt, val);
} else {
println!(" {}", opt);
}
}
}
}

53
vendor/procfs/examples/netstat.rs vendored Normal file
View file

@ -0,0 +1,53 @@
#![allow(clippy::print_literal)]
extern crate procfs;
use procfs::process::{FDTarget, Stat};
use std::collections::HashMap;
fn main() {
// get all processes
let all_procs = procfs::process::all_processes().unwrap();
// build up a map between socket inodes and processes:
let mut map: HashMap<u64, Stat> = HashMap::new();
for p in all_procs {
let process = p.unwrap();
if let (Ok(stat), Ok(fds)) = (process.stat(), process.fd()) {
for fd in fds {
if let FDTarget::Socket(inode) = fd.unwrap().target {
map.insert(inode, stat.clone());
}
}
}
}
// get the tcp table
let tcp = procfs::net::tcp().unwrap();
let tcp6 = procfs::net::tcp6().unwrap();
println!(
"{:<26} {:<26} {:<15} {:<8} {}",
"Local address", "Remote address", "State", "Inode", "PID/Program name"
);
for entry in tcp.into_iter().chain(tcp6) {
// find the process (if any) that has an open FD to this entry's inode
let local_address = format!("{}", entry.local_address);
let remote_addr = format!("{}", entry.remote_address);
let state = format!("{:?}", entry.state);
if let Some(stat) = map.get(&entry.inode) {
println!(
"{:<26} {:<26} {:<15} {:<12} {}/{}",
local_address, remote_addr, state, entry.inode, stat.pid, stat.comm
);
} else {
// We might not always be able to find the process assocated with this socket
println!(
"{:<26} {:<26} {:<15} {:<12} -",
local_address, remote_addr, state, entry.inode
);
}
}
}

7
vendor/procfs/examples/pressure.rs vendored Normal file
View file

@ -0,0 +1,7 @@
/// A basic example of /proc/pressure/ usage.
fn main() {
println!("memory pressure: {:#?}", procfs::MemoryPressure::new());
println!("cpu pressure: {:#?}", procfs::CpuPressure::new());
println!("io pressure: {:#?}", procfs::IoPressure::new());
}

View file

@ -0,0 +1,74 @@
use procfs::process::{all_processes, Stat};
struct ProcessEntry {
stat: Stat,
cmdline: Option<Vec<String>>,
}
/// Print all processes as a tree.
/// The tree reflects the hierarchical relationship between parent and child processes.
fn main() {
// Get all processes
let processes: Vec<ProcessEntry> = match all_processes() {
Err(err) => {
println!("Failed to read all processes: {}", err);
return;
}
Ok(processes) => processes,
}
.filter_map(|v| {
v.and_then(|p| {
let stat = p.stat()?;
let cmdline = p.cmdline().ok();
Ok(ProcessEntry { stat, cmdline })
})
.ok()
})
.collect();
// Iterate through all processes and start with top-level processes.
// Those can be identified by checking if their parent PID is zero.
for process in &processes {
if process.stat.ppid == 0 {
print_process(process, &processes, 0);
}
}
}
/// Take a process, print its command and recursively list all child processes.
/// This function will call itself until no further children can be found.
/// It's a depth-first tree exploration.
///
/// depth: The hierarchical depth of the process
fn print_process(process: &ProcessEntry, all_processes: &Vec<ProcessEntry>, depth: usize) {
let cmdline = match &process.cmdline {
Some(cmdline) => cmdline.join(" "),
None => "zombie process".into(),
};
// Some processes seem to have an empty cmdline.
if cmdline.is_empty() {
return;
}
// 10 characters width for the pid
let pid_length = 8;
let mut pid = process.stat.pid.to_string();
pid.push_str(&" ".repeat(pid_length - pid.len()));
let padding = " ".repeat(4 * depth);
println!("{}{}{}", pid, padding, cmdline);
let children = get_children(process.stat.pid, all_processes);
for child in &children {
print_process(child, all_processes, depth + 1);
}
}
/// Get all children of a specific process, by iterating through all processes and
/// checking their parent pid.
fn get_children(pid: i32, all_processes: &[ProcessEntry]) -> Vec<&ProcessEntry> {
all_processes
.iter()
.filter(|process| process.stat.ppid == pid)
.collect()
}

25
vendor/procfs/examples/ps.rs vendored Normal file
View file

@ -0,0 +1,25 @@
#![allow(clippy::print_literal)]
extern crate procfs;
/// A very basic clone of `ps` on Linux, in the simple no-argument mode.
/// It shows all the processes that share the same tty as our self
fn main() {
let mestat = procfs::process::Process::myself().unwrap().stat().unwrap();
let tps = procfs::ticks_per_second().unwrap();
println!("{: >10} {: <8} {: >8} {}", "PID", "TTY", "TIME", "CMD");
let tty = format!("pty/{}", mestat.tty_nr().1);
for p in procfs::process::all_processes().unwrap() {
let prc = p.unwrap();
if let Ok(stat) = prc.stat() {
if stat.tty_nr == mestat.tty_nr {
// total_time is in seconds
let total_time = (stat.utime + stat.stime) as f32 / (tps as f32);
println!("{: >10} {: <8} {: >8} {}", stat.pid, tty, total_time, stat.comm);
}
}
}
}

56
vendor/procfs/examples/self_memory.rs vendored Normal file
View file

@ -0,0 +1,56 @@
use procfs::process::Process;
fn main() {
let me = Process::myself().expect("Unable to load myself!");
println!("PID: {}", me.pid);
let page_size = procfs::page_size().expect("Unable to determinte page size!") as u64;
println!("Memory page size: {}", page_size);
// Note: when comparing the below values to what "top" will display, note that "top" will use
// base-2 units (kibibytes), not base-10 units (kilobytes).
if let Ok(stat) = me.stat() {
println!("== Data from /proc/self/stat:");
println!("Total virtual memory used: {} bytes", stat.vsize);
println!(
"Total resident set: {} pages ({} bytes)",
stat.rss,
stat.rss as u64 * page_size
);
println!();
}
if let Ok(statm) = me.statm() {
println!("== Data from /proc/self/statm:");
println!(
"Total virtual memory used: {} pages ({} bytes)",
statm.size,
statm.size * page_size
);
println!(
"Total resident set: {} pages ({} byte)s",
statm.resident,
statm.resident * page_size
);
println!(
"Total shared memory: {} pages ({} bytes)",
statm.shared,
statm.shared * page_size
);
println!();
}
if let Ok(status) = me.status() {
println!("== Data from /proc/self/status:");
println!(
"Total virtual memory used: {} bytes",
status.vmsize.expect("vmsize") * 1024
);
println!("Total resident set: {} bytes", status.vmrss.expect("vmrss") * 1024);
println!(
"Total shared memory: {} bytes",
status.rssfile.expect("rssfile") * 1024 + status.rssshmem.expect("rssshmem") * 1024
);
}
}

29
vendor/procfs/examples/shm.rs vendored Normal file
View file

@ -0,0 +1,29 @@
extern crate procfs;
/// List processes using posix shared memory segments
fn main() {
let shared_memory_vec = procfs::Shm::new().unwrap();
for shared_memory in &shared_memory_vec {
println!("key: {}, shmid: {}", shared_memory.key, shared_memory.shmid);
println!("============");
for prc in procfs::process::all_processes().unwrap() {
let prc = prc.unwrap();
match prc.smaps() {
Ok(memory_maps) => {
for (memory_map, _memory_map_data) in &memory_maps {
if let procfs::process::MMapPath::Vsys(key) = memory_map.pathname {
if key == shared_memory.key && memory_map.inode == shared_memory.shmid {
println!("{}: {:?}", prc.pid, prc.cmdline().unwrap());
}
}
}
}
Err(_) => continue,
}
}
println!();
}
}

1
vendor/procfs/rustfmt.toml vendored Normal file
View file

@ -0,0 +1 @@
max_width = 120

144
vendor/procfs/src/cgroups.rs vendored Normal file
View file

@ -0,0 +1,144 @@
use crate::ProcResult;
use super::process::Process;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
/// Container group controller information.
///
/// See also the [cgroups()] method.
pub struct CGroupController {
/// The name of the controller.
pub name: String,
/// The unique ID of the cgroup hierarchy on which this controller is mounted.
///
/// If multiple cgroups v1 controllers are bound to the same hierarchy, then each will show
/// the same hierarchy ID in this field. The value in this field will be 0 if:
///
/// * the controller is not mounted on a cgroups v1 hierarchy;
/// * the controller is bound to the cgroups v2 single unified hierarchy; or
/// * the controller is disabled (see below).
pub hierarchy: u32,
/// The number of control groups in this hierarchy using this controller.
pub num_cgroups: u32,
/// This field contains the value `true` if this controller is enabled, or `false` if it has been disabled
pub enabled: bool,
}
/// Information about the cgroup controllers that are compiled into the kernel
///
/// (since Linux 2.6.24)
// This is returning a vector, but if each subsystem name is unique, maybe this can be a hashmap
// instead
pub fn cgroups() -> ProcResult<Vec<CGroupController>> {
use std::fs::File;
use std::io::{BufRead, BufReader};
let file = File::open("/proc/cgroups")?;
let reader = BufReader::new(file);
let mut vec = Vec::new();
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
let mut s = line.split_whitespace();
let name = expect!(s.next(), "name").to_owned();
let hierarchy = from_str!(u32, expect!(s.next(), "hierarchy"));
let num_cgroups = from_str!(u32, expect!(s.next(), "num_cgroups"));
let enabled = expect!(s.next(), "enabled") == "1";
vec.push(CGroupController {
name,
hierarchy,
num_cgroups,
enabled,
});
}
Ok(vec)
}
/// Information about a process cgroup
///
/// See also the [Process::cgroups()] method.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ProcessCgroup {
/// For cgroups version 1 hierarchies, this field contains a unique hierarchy ID number
/// that can be matched to a hierarchy ID in /proc/cgroups. For the cgroups version 2
/// hierarchy, this field contains the value 0.
pub hierarchy: u32,
/// For cgroups version 1 hierarchies, this field contains a comma-separated list of the
/// controllers bound to the hierarchy.
///
/// For the cgroups version 2 hierarchy, this field is empty.
pub controllers: Vec<String>,
/// This field contains the pathname of the control group in the hierarchy to which the process
/// belongs.
///
/// This pathname is relative to the mount point of the hierarchy.
pub pathname: String,
}
impl Process {
/// Describes control groups to which the process with the corresponding PID belongs.
///
/// The displayed information differs for cgroupsversion 1 and version 2 hierarchies.
pub fn cgroups(&self) -> ProcResult<Vec<ProcessCgroup>> {
use std::io::{BufRead, BufReader};
let file = self.open_relative("cgroup")?;
let reader = BufReader::new(file);
let mut vec = Vec::new();
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
let mut s = line.splitn(3, ':');
let hierarchy = from_str!(u32, expect!(s.next(), "hierarchy"));
let controllers = expect!(s.next(), "controllers")
.split(',')
.map(|s| s.to_owned())
.collect();
let pathname = expect!(s.next(), "path").to_owned();
vec.push(ProcessCgroup {
hierarchy,
controllers,
pathname,
});
}
Ok(vec)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cgroups() {
let groups = cgroups().unwrap();
println!("{:?}", groups);
}
#[test]
fn test_process_cgroups() {
let myself = Process::myself().unwrap();
let groups = myself.cgroups();
println!("{:?}", groups);
}
}

227
vendor/procfs/src/cpuinfo.rs vendored Normal file
View file

@ -0,0 +1,227 @@
use crate::{FileWrapper, ProcResult};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, io::Read};
/// Represents the data from `/proc/cpuinfo`.
///
/// The `fields` field stores the fields that are common among all CPUs. The `cpus` field stores
/// CPU-specific info.
///
/// For common fields, there are methods that will return the data, converted to a more appropriate
/// data type. These methods will all return `None` if the field doesn't exist, or is in some
/// unexpected format (in that case, you'll have to access the string data directly).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct CpuInfo {
/// This stores fields that are common among all CPUs
pub fields: HashMap<String, String>,
pub cpus: Vec<HashMap<String, String>>,
}
impl CpuInfo {
/// Get CpuInfo from a custom Read instead of the default `/proc/cpuinfo`.
pub fn from_reader<R: Read>(r: R) -> ProcResult<CpuInfo> {
use std::io::{BufRead, BufReader};
let reader = BufReader::new(r);
let mut list = Vec::new();
let mut map = Some(HashMap::new());
// the first line of a cpu block must start with "processor"
let mut found_first = false;
for line in reader.lines().flatten() {
if !line.is_empty() {
let mut s = line.split(':');
let key = expect!(s.next());
if !found_first && key.trim() == "processor" {
found_first = true;
}
if !found_first {
continue;
}
if let Some(value) = s.next() {
let key = key.trim().to_owned();
let value = value.trim().to_owned();
map.get_or_insert(HashMap::new()).insert(key, value);
}
} else if let Some(map) = map.take() {
list.push(map);
found_first = false;
}
}
if let Some(map) = map.take() {
list.push(map);
}
// find properties that are the same for all cpus
assert!(!list.is_empty());
let common_fields: Vec<String> = list[0]
.iter()
.filter_map(|(key, val)| {
if list.iter().all(|map| map.get(key).map_or(false, |v| v == val)) {
Some(key.clone())
} else {
None
}
})
.collect();
let mut common_map = HashMap::new();
for (k, v) in &list[0] {
if common_fields.contains(k) {
common_map.insert(k.clone(), v.clone());
}
}
for map in &mut list {
map.retain(|k, _| !common_fields.contains(k));
}
Ok(CpuInfo {
fields: common_map,
cpus: list,
})
}
pub fn new() -> ProcResult<CpuInfo> {
let file = FileWrapper::open("/proc/cpuinfo")?;
CpuInfo::from_reader(file)
}
/// Get the total number of cpu cores.
///
/// This is the number of entries in the `/proc/cpuinfo` file.
pub fn num_cores(&self) -> usize {
self.cpus.len()
}
/// Get info for a specific cpu.
///
/// This will merge the common fields with the cpu-specific fields.
///
/// Returns None if the requested cpu index is not found.
pub fn get_info(&self, cpu_num: usize) -> Option<HashMap<&str, &str>> {
self.cpus.get(cpu_num).map(|info| {
self.fields
.iter()
.chain(info.iter())
.map(|(k, v)| (k.as_ref(), v.as_ref()))
.collect()
})
}
/// Get the content of a specific field associated to a CPU
///
/// Returns None if the requested cpu index is not found.
pub fn get_field(&self, cpu_num: usize, field_name: &str) -> Option<&str> {
self.cpus.get(cpu_num).and_then(|cpu_fields| {
cpu_fields
.get(field_name)
.or_else(|| self.fields.get(field_name))
.map(|s| s.as_ref())
})
}
pub fn model_name(&self, cpu_num: usize) -> Option<&str> {
self.get_field(cpu_num, "model name")
}
pub fn vendor_id(&self, cpu_num: usize) -> Option<&str> {
self.get_field(cpu_num, "vendor_id")
}
/// May not be available on some older 2.6 kernels
pub fn physical_id(&self, cpu_num: usize) -> Option<u32> {
self.get_field(cpu_num, "physical id").and_then(|s| s.parse().ok())
}
pub fn flags(&self, cpu_num: usize) -> Option<Vec<&str>> {
self.get_field(cpu_num, "flags")
.map(|flags| flags.split_whitespace().collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cpuinfo() {
let info = CpuInfo::new().unwrap();
println!("{:#?}", info.flags(0));
for num in 0..info.num_cores() {
info.model_name(num).unwrap();
info.vendor_id(num).unwrap();
// May not be available on some old kernels:
info.physical_id(num);
}
//assert_eq!(info.num_cores(), 8);
}
#[test]
fn test_cpuinfo_rpi() {
// My rpi system includes some stuff at the end of /proc/cpuinfo that we shouldn't parse
let data = r#"processor : 0
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 1
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 2
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 3
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
Hardware : BCM2835
Revision : a020d3
Serial : 0000000012345678
Model : Raspberry Pi 3 Model B Plus Rev 1.3
"#;
let r = std::io::Cursor::new(data.as_bytes());
let info = CpuInfo::from_reader(r).unwrap();
assert_eq!(info.num_cores(), 4);
let info = info.get_info(0).unwrap();
assert!(info.get("model name").is_some());
assert!(info.get("BogoMIPS").is_some());
assert!(info.get("Features").is_some());
assert!(info.get("CPU implementer").is_some());
assert!(info.get("CPU architecture").is_some());
assert!(info.get("CPU variant").is_some());
assert!(info.get("CPU part").is_some());
assert!(info.get("CPU revision").is_some());
}
}

162
vendor/procfs/src/diskstats.rs vendored Normal file
View file

@ -0,0 +1,162 @@
use crate::{FileWrapper, ProcResult};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::io::{BufRead, BufReader};
/// Disk IO stat information
///
/// To fully understand these fields, please see the [iostats.txt](https://www.kernel.org/doc/Documentation/iostats.txt)
/// kernel documentation.
///
/// For an example, see the [diskstats.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
// Doc reference: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
// Doc reference: https://www.kernel.org/doc/Documentation/iostats.txt
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct DiskStat {
/// The device major number
pub major: i32,
/// The device minor number
pub minor: i32,
/// Device name
pub name: String,
/// Reads completed successfully
///
/// This is the total number of reads completed successfully
pub reads: u64,
/// Reads merged
///
/// The number of adjacent reads that have been merged for efficiency.
pub merged: u64,
/// Sectors read successfully
///
/// This is the total number of sectors read successfully.
pub sectors_read: u64,
/// Time spent reading (ms)
pub time_reading: u64,
/// writes completed
pub writes: u64,
/// writes merged
///
/// The number of adjacent writes that have been merged for efficiency.
pub writes_merged: u64,
/// Sectors written successfully
pub sectors_written: u64,
/// Time spent writing (ms)
pub time_writing: u64,
/// I/Os currently in progress
pub in_progress: u64,
/// Time spent doing I/Os (ms)
pub time_in_progress: u64,
/// Weighted time spent doing I/Os (ms)
pub weighted_time_in_progress: u64,
/// Discards completed successfully
///
/// (since kernel 4.18)
pub discards: Option<u64>,
/// Discards merged
pub discards_merged: Option<u64>,
/// Sectors discarded
pub sectors_discarded: Option<u64>,
/// Time spent discarding
pub time_discarding: Option<u64>,
/// Flush requests completed successfully
///
/// (since kernel 5.5)
pub flushes: Option<u64>,
/// Time spent flushing
pub time_flushing: Option<u64>,
}
/// Get disk IO stat info from /proc/diskstats
pub fn diskstats() -> ProcResult<Vec<DiskStat>> {
let file = FileWrapper::open("/proc/diskstats")?;
let reader = BufReader::new(file);
let mut v = Vec::new();
for line in reader.lines() {
let line = line?;
v.push(DiskStat::from_line(&line)?);
}
Ok(v)
}
impl DiskStat {
pub fn from_line(line: &str) -> ProcResult<DiskStat> {
let mut s = line.split_whitespace();
let major = from_str!(i32, expect!(s.next()));
let minor = from_str!(i32, expect!(s.next()));
let name = expect!(s.next()).to_string();
let reads = from_str!(u64, expect!(s.next()));
let merged = from_str!(u64, expect!(s.next()));
let sectors_read = from_str!(u64, expect!(s.next()));
let time_reading = from_str!(u64, expect!(s.next()));
let writes = from_str!(u64, expect!(s.next()));
let writes_merged = from_str!(u64, expect!(s.next()));
let sectors_written = from_str!(u64, expect!(s.next()));
let time_writing = from_str!(u64, expect!(s.next()));
let in_progress = from_str!(u64, expect!(s.next()));
let time_in_progress = from_str!(u64, expect!(s.next()));
let weighted_time_in_progress = from_str!(u64, expect!(s.next()));
let discards = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let discards_merged = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let sectors_discarded = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let time_discarding = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let flushes = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let time_flushing = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
Ok(DiskStat {
major,
minor,
name,
reads,
merged,
sectors_read,
time_reading,
writes,
writes_merged,
sectors_written,
time_writing,
in_progress,
time_in_progress,
weighted_time_in_progress,
discards,
discards_merged,
sectors_discarded,
time_discarding,
flushes,
time_flushing,
})
}
}
#[cfg(test)]
mod tests {
#[test]
fn diskstat() {
for disk in super::diskstats().unwrap() {
println!("{:?}", disk);
}
}
}

433
vendor/procfs/src/keyring.rs vendored Normal file
View file

@ -0,0 +1,433 @@
//! Functions related to the in-kernel key management and retention facility
//!
//! For more details on this facility, see the `keyrings(7)` man page.
//!
//! Additional functions can be found in the [kernel::keys](crate::sys::kernel::keys) module.
use crate::{FileWrapper, ProcResult};
use bitflags::bitflags;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
io::{BufRead, BufReader},
time::Duration,
};
bitflags! {
/// Various key flags
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct KeyFlags: u32 {
/// The key has been instantiated
const INSTANTIATED = 0x01;
/// THe key has been revoked
const REVOKED = 0x02;
/// The key is dead
///
/// I.e. the key type has been unregistered. A key may be briefly in this state during garbage collection.
const DEAD = 0x04;
/// The key contributes to the user's quota
const QUOTA = 0x08;
/// The key is under construction via a callback to user space
const UNDER_CONSTRUCTION = 0x10;
/// The key is negatively instantiated
const NEGATIVE = 0x20;
/// The key has been invalidated
const INVALID = 0x40;
}
}
bitflags! {
/// Bitflags that represent the permissions for a key
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct PermissionFlags: u32 {
/// The attributes of the key may be read
///
/// This includes the type, description, and access rights (excluding the security label)
const VIEW = 0x01;
/// For a key: the payload of the key may be read. For a keyring: the list of serial numbers (keys) to which the keyring has links may be read.
const READ = 0x02;
/// The payload of the key may be updated and the key may be revoked.
///
/// For a keyring, links may be added to or removed from the keyring, and the keyring
/// may be cleared completely (all links are removed).
const WRITE = 0x04;
/// The key may be found by a search.
///
/// For keyrings: keys and keyrings that are linked to by the keyring may be searched.
const SEARCH = 0x08;
/// Links may be created from keyrings to the key.
///
/// The initial link to a key that is established when the key is created doesn't require this permission.
const LINK = 0x10;
/// The ownership details and security label of the key may be changed, the key's expiration
/// time may be set, and the key may be revoked.
const SETATTR = 0x20;
const ALL = Self::VIEW.bits | Self::READ.bits | Self::WRITE.bits | Self::SEARCH.bits | Self::LINK.bits | Self::SETATTR.bits;
}
}
impl KeyFlags {
fn from_str(s: &str) -> KeyFlags {
let mut me = KeyFlags::empty();
let mut chars = s.chars();
match chars.next() {
Some(c) if c == 'I' => me.insert(KeyFlags::INSTANTIATED),
_ => {}
}
match chars.next() {
Some(c) if c == 'R' => me.insert(KeyFlags::REVOKED),
_ => {}
}
match chars.next() {
Some(c) if c == 'D' => me.insert(KeyFlags::DEAD),
_ => {}
}
match chars.next() {
Some(c) if c == 'Q' => me.insert(KeyFlags::QUOTA),
_ => {}
}
match chars.next() {
Some(c) if c == 'U' => me.insert(KeyFlags::UNDER_CONSTRUCTION),
_ => {}
}
match chars.next() {
Some(c) if c == 'N' => me.insert(KeyFlags::NEGATIVE),
_ => {}
}
match chars.next() {
Some(c) if c == 'i' => me.insert(KeyFlags::INVALID),
_ => {}
}
me
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Permissions {
pub possessor: PermissionFlags,
pub user: PermissionFlags,
pub group: PermissionFlags,
pub other: PermissionFlags,
}
impl Permissions {
fn from_str(s: &str) -> ProcResult<Permissions> {
let possessor = PermissionFlags::from_bits(from_str!(u32, &s[0..2], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
let user = PermissionFlags::from_bits(from_str!(u32, &s[2..4], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
let group = PermissionFlags::from_bits(from_str!(u32, &s[4..6], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
let other = PermissionFlags::from_bits(from_str!(u32, &s[6..8], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
Ok(Permissions {
possessor,
user,
group,
other,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum KeyTimeout {
Permanent,
Expired,
Timeout(Duration),
}
impl KeyTimeout {
fn from_str(s: &str) -> ProcResult<KeyTimeout> {
if s == "perm" {
Ok(KeyTimeout::Permanent)
} else if s == "expd" {
Ok(KeyTimeout::Expired)
} else {
let (val, unit) = s.split_at(s.len() - 1);
let val = from_str!(u64, val);
match unit {
"s" => Ok(KeyTimeout::Timeout(Duration::from_secs(val))),
"m" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60))),
"h" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60 * 60))),
"d" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60 * 60 * 24))),
"w" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60 * 60 * 24 * 7))),
_ => Err(build_internal_error!(format!("Unable to parse keytimeout of {:?}", s))),
}
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum KeyType {
/// This is a general-purpose key type.
///
/// The key is kept entirely within kernel memory. The payload may be read and updated by
/// user-space applications. The payload for keys of this type is a blob of arbitrary
/// data of up to 32,767 bytes.
/// The description may be any valid string, though it is preferred that it start
/// with a colon-delimited prefix representing the service to which the key is of
/// interest (for instance "afs:mykey").
User,
/// Keyrings are special keys which store a set of links to other keys (including
/// other keyrings), analogous to a directory holding links to files. The main
/// purpose of a keyring is to prevent other keys from being garbage collected
/// because nothing refers to them.
///
/// Keyrings with descriptions (names) that begin with a period ('.') are re
/// served to the implementation.
Keyring,
/// This key type is essentially the same as "user", but it does not provide
/// reading (i.e., the keyctl(2) KEYCTL_READ operation), meaning that the key
/// payload is never visible from user space. This is suitable for storing user
/// name-password pairs that should not be readable from user space.
///
/// The description of a "logon" key must start with a non-empty colon-delimited
/// prefix whose purpose is to identify the service to which the key belongs.
/// (Note that this differs from keys of the "user" type, where the inclusion of
/// a prefix is recommended but is not enforced.)
Logon,
/// This key type is similar to the "user" key type, but it may hold a payload of
/// up to 1 MiB in size. This key type is useful for purposes such as holding
/// Kerberos ticket caches.
///
/// The payload data may be stored in a tmpfs filesystem, rather than in kernel
/// memory, if the data size exceeds the overhead of storing the data in the
/// filesystem. (Storing the data in a filesystem requires filesystem structures
/// to be allocated in the kernel. The size of these structures determines the
/// size threshold above which the tmpfs storage method is used.) Since Linux
/// 4.8, the payload data is encrypted when stored in tmpfs, thereby preventing
/// it from being written unencrypted into swap space.
BigKey,
/// Other specialized, but rare keys types
Other(String),
}
impl KeyType {
fn from_str(s: &str) -> KeyType {
match s {
"keyring" => KeyType::Keyring,
"user" => KeyType::User,
"logon" => KeyType::Logon,
"big_key" => KeyType::BigKey,
other => KeyType::Other(other.to_string()),
}
}
}
/// A key
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Key {
/// The ID (serial number) of the key
pub id: u64,
/// A set of flags describing the state of the key
pub flags: KeyFlags,
/// Count of the number of kernel credential structures that are
/// pinning the key (approximately: the number of threads and open file
/// references that refer to this key).
pub usage: u32,
/// Key timeout
pub timeout: KeyTimeout,
/// Key permissions
pub permissions: Permissions,
/// The user ID of the key owner
pub uid: u32,
/// The group ID of the key.
///
/// The value of `None` here means that the key has no group ID; this can occur in certain circumstances for
/// keys created by the kernel.
pub gid: Option<u32>,
/// The type of key
pub key_type: KeyType,
/// The key description
pub description: String,
}
impl Key {
fn from_line(s: &str) -> ProcResult<Key> {
let mut s = s.split_whitespace();
let id = from_str!(u64, expect!(s.next()), 16);
let s_flags = expect!(s.next());
let usage = from_str!(u32, expect!(s.next()));
let s_timeout = expect!(s.next());
let s_perms = expect!(s.next());
let uid = from_str!(u32, expect!(s.next()));
let s_gid = expect!(s.next());
let s_type = expect!(s.next());
let desc: Vec<_> = s.collect();
Ok(Key {
id,
flags: KeyFlags::from_str(s_flags),
usage,
timeout: KeyTimeout::from_str(s_timeout)?,
permissions: Permissions::from_str(s_perms)?,
uid,
gid: if s_gid == "-1" {
None
} else {
Some(from_str!(u32, s_gid))
},
key_type: KeyType::from_str(s_type),
description: desc.join(" "),
})
}
}
/// Returns a list of the keys for which the reading thread has **view** permission, providing various information about each key.
pub fn keys() -> ProcResult<Vec<Key>> {
let file = FileWrapper::open("/proc/keys")?;
let reader = BufReader::new(file);
let mut v = Vec::new();
for line in reader.lines() {
let line = line?;
v.push(Key::from_line(&line)?);
}
Ok(v)
}
/// Information about a user with at least one key
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct KeyUser {
/// The user that owns the key
pub uid: u32,
/// The kernel-internal usage count for the kernel structure used to record key users
pub usage: u32,
/// The total number of keys owned by the user
pub nkeys: u32,
/// THe number of keys that have been instantiated
pub nikeys: u32,
/// The number of keys owned by the user
pub qnkeys: u32,
/// The maximum number of keys that the user may own
pub maxkeys: u32,
/// The number of bytes consumed in playloads of the keys owned by this user
pub qnbytes: u32,
/// The upper limit on the number of bytes in key payloads for this user
pub maxbytes: u32,
}
impl KeyUser {
fn from_str(s: &str) -> ProcResult<KeyUser> {
let mut s = s.split_whitespace();
let uid = expect!(s.next());
let usage = from_str!(u32, expect!(s.next()));
let keys = expect!(s.next());
let qkeys = expect!(s.next());
let qbytes = expect!(s.next());
let (nkeys, nikeys) = {
let mut s = keys.split('/');
(from_str!(u32, expect!(s.next())), from_str!(u32, expect!(s.next())))
};
let (qnkeys, maxkeys) = {
let mut s = qkeys.split('/');
(from_str!(u32, expect!(s.next())), from_str!(u32, expect!(s.next())))
};
let (qnbytes, maxbytes) = {
let mut s = qbytes.split('/');
(from_str!(u32, expect!(s.next())), from_str!(u32, expect!(s.next())))
};
Ok(KeyUser {
uid: from_str!(u32, &uid[0..uid.len() - 1]),
usage,
nkeys,
nikeys,
qnkeys,
maxkeys,
qnbytes,
maxbytes,
})
}
}
/// Get various information for each user ID that has at least one key on the system.
pub fn key_users() -> ProcResult<HashMap<u32, KeyUser>> {
let file = FileWrapper::open("/proc/key-users")?;
let reader = BufReader::new(file);
let mut map = HashMap::new();
for line in reader.lines() {
let line = line?;
let user = KeyUser::from_str(&line)?;
map.insert(user.uid, user);
}
Ok(map)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn key_flags() {
assert_eq!(KeyFlags::from_str("I------"), KeyFlags::INSTANTIATED);
assert_eq!(KeyFlags::from_str("IR"), KeyFlags::INSTANTIATED | KeyFlags::REVOKED);
assert_eq!(KeyFlags::from_str("IRDQUNi"), KeyFlags::all());
}
#[test]
fn timeout() {
assert_eq!(KeyTimeout::from_str("perm").unwrap(), KeyTimeout::Permanent);
assert_eq!(KeyTimeout::from_str("expd").unwrap(), KeyTimeout::Expired);
assert_eq!(
KeyTimeout::from_str("2w").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("14d").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("336h").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("20160m").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("1209600s").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
}
#[test]
fn live_keys() {
for key in keys().unwrap() {
println!("{:#?}", key);
}
}
#[test]
fn live_key_users() {
for (_user, data) in key_users().unwrap() {
println!("{:#?}", data);
}
}
}

1389
vendor/procfs/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load diff

257
vendor/procfs/src/locks.rs vendored Normal file
View file

@ -0,0 +1,257 @@
use crate::{FileWrapper, ProcResult};
use std::io::{BufRead, BufReader};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// The type of a file lock
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LockType {
/// A BSD file lock created using `flock`
FLock,
/// A POSIX byte-range lock created with `fcntl`
Posix,
/// An Open File Description (ODF) lock created with `fnctl`
ODF,
/// Some other unknown lock type
Other(String),
}
impl LockType {
pub fn as_str(&self) -> &str {
match self {
LockType::FLock => "FLOCK",
LockType::Posix => "POSIX",
LockType::ODF => "ODF",
LockType::Other(s) => s.as_ref(),
}
}
}
impl From<&str> for LockType {
fn from(s: &str) -> LockType {
match s {
"FLOCK" => LockType::FLock,
"OFDLCK" => LockType::ODF,
"POSIX" => LockType::Posix,
x => LockType::Other(x.to_string()),
}
}
}
/// The mode of a lock (advisory or mandatory)
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LockMode {
Advisory,
Mandatory,
/// Some other unknown lock mode
Other(String),
}
impl LockMode {
pub fn as_str(&self) -> &str {
match self {
LockMode::Advisory => "ADVISORY",
LockMode::Mandatory => "MANDATORY",
LockMode::Other(s) => s.as_ref(),
}
}
}
impl From<&str> for LockMode {
fn from(s: &str) -> LockMode {
match s {
"ADVISORY" => LockMode::Advisory,
"MANDATORY" => LockMode::Mandatory,
x => LockMode::Other(x.to_string()),
}
}
}
/// The kind of a lock (read or write)
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LockKind {
/// A read lock (or BSD shared lock)
Read,
/// A write lock (or a BSD exclusive lock)
Write,
/// Some other unknown lock kind
Other(String),
}
impl LockKind {
pub fn as_str(&self) -> &str {
match self {
LockKind::Read => "READ",
LockKind::Write => "WRITE",
LockKind::Other(s) => s.as_ref(),
}
}
}
impl From<&str> for LockKind {
fn from(s: &str) -> LockKind {
match s {
"READ" => LockKind::Read,
"WRITE" => LockKind::Write,
x => LockKind::Other(x.to_string()),
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
/// Details about an individual file lock
///
/// See the [`locks`] function.
///
/// For an example, see the [lslocks.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
pub struct Lock {
/// The type of lock
pub lock_type: LockType,
/// The lock mode (advisory or mandatory)
pub mode: LockMode,
/// The kind of lock (read or write)
pub kind: LockKind,
/// The process that owns the lock
///
/// Because OFD locks are not owned by a single process (since multiple processes
/// may have file descriptors that refer to the same FD), this field may be `None`.
///
/// Before kernel 4.14 a bug meant that the PID of of the process that initially
/// acquired the lock was displayed instead of `None`.
pub pid: Option<i32>,
/// The major ID of the device containing the FS that contains this lock
pub devmaj: u32,
/// The minor ID of the device containing the FS that contains this lock
pub devmin: u32,
/// The inode of the locked file
pub inode: u64,
/// The offset (in bytes) of the first byte of the lock.
///
/// For BSD locks, this value is always 0.
pub offset_first: u64,
/// The offset (in bytes) of the last byte of the lock.
///
/// `None` means the lock extends to the end of the file. For BSD locks,
/// the value is always `None`.
pub offset_last: Option<u64>,
}
impl Lock {
fn from_line(line: &str) -> ProcResult<Lock> {
let mut s = line.split_whitespace();
let _ = expect!(s.next());
let typ = {
let t = expect!(s.next());
if t == "->" {
// some locks start a "->" which apparently means they are "blocked" (but i'm not sure what that actually means)
From::from(expect!(s.next()))
} else {
From::from(t)
}
};
let mode = From::from(expect!(s.next()));
let kind = From::from(expect!(s.next()));
let pid = expect!(s.next());
let disk_inode = expect!(s.next());
let offset_first = from_str!(u64, expect!(s.next()));
let offset_last = expect!(s.next());
let mut dis = disk_inode.split(':');
let devmaj = from_str!(u32, expect!(dis.next()), 16);
let devmin = from_str!(u32, expect!(dis.next()), 16);
let inode = from_str!(u64, expect!(dis.next()));
Ok(Lock {
lock_type: typ,
mode,
kind,
pid: if pid == "-1" { None } else { Some(from_str!(i32, pid)) },
devmaj,
devmin,
inode,
offset_first,
offset_last: if offset_last == "EOF" {
None
} else {
Some(from_str!(u64, offset_last))
},
})
}
}
/// Get a list of current file locks and leases
///
/// Since Linux 4.9, the list of locks is filtered to show just the locks
/// for the processes in the PID namespace for which the `/proc` filesystem
/// was mounted.
pub fn locks() -> ProcResult<Vec<Lock>> {
let file = FileWrapper::open("/proc/locks")?;
let reader = BufReader::new(file);
let mut v = Vec::new();
for line in reader.lines() {
let line = line?;
v.push(Lock::from_line(&line)?);
}
Ok(v)
}
#[cfg(test)]
mod tests {
use crate::{locks, LockKind, LockMode, LockType};
#[test]
fn live() {
for lock in locks().unwrap() {
println!("{:?}", lock);
if let LockType::Other(s) = lock.lock_type {
panic!("Found an unknown lock type {:?}", s);
}
if let LockKind::Other(s) = lock.kind {
panic!("Found an unknown lock kind {:?}", s);
}
if let LockMode::Other(s) = lock.mode {
panic!("Found an unknown lock mode {:?}", s);
}
}
}
#[test]
fn test_blocked() {
let data = r#"1: POSIX ADVISORY WRITE 723 00:14:16845 0 EOF
2: FLOCK ADVISORY WRITE 652 00:14:16763 0 EOF
3: FLOCK ADVISORY WRITE 1594 fd:00:396528 0 EOF
4: FLOCK ADVISORY WRITE 1594 fd:00:396527 0 EOF
5: FLOCK ADVISORY WRITE 2851 fd:00:529372 0 EOF
6: POSIX ADVISORY WRITE 1280 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1281 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1279 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1282 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1283 00:14:16200 0 0
7: OFDLCK ADVISORY READ -1 00:06:1028 0 EOF
8: FLOCK ADVISORY WRITE 6471 fd:00:529426 0 EOF
9: FLOCK ADVISORY WRITE 6471 fd:00:529424 0 EOF
10: FLOCK ADVISORY WRITE 6471 fd:00:529420 0 EOF
11: FLOCK ADVISORY WRITE 6471 fd:00:529418 0 EOF
12: POSIX ADVISORY WRITE 1279 00:14:23553 0 EOF
13: FLOCK ADVISORY WRITE 6471 fd:00:393838 0 EOF
14: POSIX ADVISORY WRITE 655 00:14:16146 0 EOF"#;
for line in data.lines() {
super::Lock::from_line(line.trim()).unwrap();
}
}
}

593
vendor/procfs/src/meminfo.rs vendored Normal file
View file

@ -0,0 +1,593 @@
use std::io;
use super::{convert_to_kibibytes, FileWrapper, ProcResult};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// This struct reports statistics about memory usage on the system, based on
/// the `/proc/meminfo` file.
///
/// It is used by `free(1)` to report the amount of free and used memory (both
/// physical and swap) on the system as well as the shared memory and
/// buffers used by the kernel. Each struct member is generally reported in
/// bytes, but a few are unitless values.
///
/// Except as noted below, all of the fields have been present since at least
/// Linux 2.6.0. Some fields are optional and are present only if the kernel
/// was configured with various options; those dependencies are noted in the list.
///
/// **Notes**
///
/// While the file shows kilobytes (kB; 1 kB equals 1000 B),
/// it is actually kibibytes (KiB; 1 KiB equals 1024 B).
///
/// This imprecision in /proc/meminfo is known,
/// but is not corrected due to legacy concerns -
/// programs rely on /proc/meminfo to specify size with the "kB" string.
///
/// New fields to this struct may be added at any time (even without a major or minor semver bump).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[allow(non_snake_case)]
#[non_exhaustive]
pub struct Meminfo {
/// Total usable RAM (i.e., physical RAM minus a few reserved bits and the kernel binary code).
pub mem_total: u64,
/// The sum of [LowFree](#structfield.low_free) + [HighFree](#structfield.high_free).
pub mem_free: u64,
/// An estimate of how much memory is available for starting new applications, without swapping.
///
/// (since Linux 3.14)
pub mem_available: Option<u64>,
/// Relatively temporary storage for raw disk blocks that shouldn't get tremendously large (20MB or so).
pub buffers: u64,
/// In-memory cache for files read from the disk (the page cache). Doesn't include SwapCached.
pub cached: u64,
/// Memory that once was swapped out, is swapped back in but still also is in the swap
/// file.
///
/// (If memory pressure is high, these pages don't need to be swapped out again
/// because they are already in the swap file. This saves I/O.)
pub swap_cached: u64,
/// Memory that has been used more recently and usually not reclaimed unless absolutely
/// necessary.
pub active: u64,
/// Memory which has been less recently used. It is more eligible to be reclaimed for other
/// purposes.
pub inactive: u64,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub active_anon: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub inactive_anon: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub active_file: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub inactive_file: Option<u64>,
/// [To be documented.]
///
/// (From Linux 2.6.28 to 2.6.30, CONFIG_UNEVICTABLE_LRU was required.)
pub unevictable: Option<u64>,
/// [To be documented.]
///
/// (From Linux 2.6.28 to 2.6.30, CONFIG_UNEVICTABLE_LRU was required.)
pub mlocked: Option<u64>,
/// Total amount of highmem.
///
/// Highmem is all memory above ~860MB of physical memory. Highmem areas are for use by
/// user-space programs, or for the page cache. The kernel must use tricks to access this
/// memory, making it slower to access than lowmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub high_total: Option<u64>,
/// Amount of free highmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub high_free: Option<u64>,
/// Total amount of lowmem.
///
/// Lowmem is memory which can be used for every thing that highmem can be used for,
/// but it is also available for the kernel's use for its own data structures.
/// Among many other things, it is where everything from Slab is allocated.
/// Bad things happen when you're out of lowmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub low_total: Option<u64>,
/// Amount of free lowmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub low_free: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.29. CONFIG_MMU is required.)
pub mmap_copy: Option<u64>,
/// Total amount of swap space available.
pub swap_total: u64,
/// Amount of swap space that is currently unused.
pub swap_free: u64,
/// Memory which is waiting to get written back to the disk.
pub dirty: u64,
/// Memory which is actively being written back to the disk.
pub writeback: u64,
/// Non-file backed pages mapped into user-space page tables.
///
/// (since Linux 2.6.18)
pub anon_pages: Option<u64>,
/// Files which have been mapped into memory (with mmap(2)), such as libraries.
pub mapped: u64,
/// Amount of memory consumed in tmpfs(5) filesystems.
///
/// (since Linux 2.6.32)
pub shmem: Option<u64>,
/// In-kernel data structures cache.
pub slab: u64,
/// Part of Slab, that cannot be reclaimed on memory pressure.
///
/// (since Linux 2.6.19)
pub s_reclaimable: Option<u64>,
/// Part of Slab, that cannot be reclaimed on memory pressure.
///
/// (since Linux 2.6.19)
pub s_unreclaim: Option<u64>,
/// Amount of memory allocated to kernel stacks.
///
/// (since Linux 2.6.32)
pub kernel_stack: Option<u64>,
/// Amount of memory dedicated to the lowest level of page tables.
///
/// (since Linux 2.6.18)
pub page_tables: Option<u64>,
/// [To be documented.]
///
/// (CONFIG_QUICKLIST is required. Since Linux 2.6.27)
pub quicklists: Option<u64>,
/// NFS pages sent to the server, but not yet committed to stable storage.
///
/// (since Linux 2.6.18)
pub nfs_unstable: Option<u64>,
/// Memory used for block device "bounce buffers".
///
/// (since Linux 2.6.18)
pub bounce: Option<u64>,
/// Memory used by FUSE for temporary writeback buffers.
///
/// (since Linux 2.6.26)
pub writeback_tmp: Option<u64>,
/// This is the total amount of memory currently available to be allocated on the system,
/// expressed in bytes.
///
/// This limit is adhered to only if strict overcommit
/// accounting is enabled (mode 2 in /proc/sys/vm/overcommit_memory). The limit is calculated
/// according to the formula described under /proc/sys/vm/overcommit_memory. For further
/// details, see the kernel source file
/// [Documentation/vm/overcommit-accounting](https://www.kernel.org/doc/Documentation/vm/overcommit-accounting).
///
/// (since Linux 2.6.10)
pub commit_limit: Option<u64>,
/// The amount of memory presently allocated on the system.
///
/// The committed memory is a sum of all of the memory which has been allocated
/// by processes, even if it has not been "used" by them as of yet. A process which allocates 1GB of memory (using malloc(3)
/// or similar), but touches only 300MB of that memory will show up as using only 300MB of memory even if it has the address space
/// allocated for the entire 1GB.
///
/// This 1GB is memory which has been "committed" to by the VM and can be used at any time by the allocating application. With
/// strict overcommit enabled on the system (mode 2 in /proc/sys/vm/overcommit_memory), allocations which would exceed the Committed_AS
/// mitLimit will not be permitted. This is useful if one needs to guarantee that processes will not fail due to lack of memory once
/// that memory has been successfully allocated.
pub committed_as: u64,
/// Total size of vmalloc memory area.
pub vmalloc_total: u64,
/// Amount of vmalloc area which is used.
pub vmalloc_used: u64,
/// Largest contiguous block of vmalloc area which is free.
pub vmalloc_chunk: u64,
/// [To be documented.]
///
/// (CONFIG_MEMORY_FAILURE is required. Since Linux 2.6.32)
pub hardware_corrupted: Option<u64>,
/// Non-file backed huge pages mapped into user-space page tables.
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 2.6.38)
pub anon_hugepages: Option<u64>,
/// Memory used by shared memory (shmem) and tmpfs(5) allocated with huge pages
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 4.8)
pub shmem_hugepages: Option<u64>,
/// Shared memory mapped into user space with huge pages.
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 4.8)
pub shmem_pmd_mapped: Option<u64>,
/// Total CMA (Contiguous Memory Allocator) pages.
///
/// (CONFIG_CMA is required. Since Linux 3.1)
pub cma_total: Option<u64>,
/// Free CMA (Contiguous Memory Allocator) pages.
///
/// (CONFIG_CMA is required. Since Linux 3.1)
pub cma_free: Option<u64>,
/// The size of the pool of huge pages.
///
/// CONFIG_HUGETLB_PAGE is required.)
pub hugepages_total: Option<u64>,
/// The number of huge pages in the pool that are not yet allocated.
///
/// (CONFIG_HUGETLB_PAGE is required.)
pub hugepages_free: Option<u64>,
/// This is the number of huge pages for which a commitment to allocate from the pool has been
/// made, but no allocation has yet been made.
///
/// These reserved huge pages guarantee that an application will be able to allocate a
/// huge page from the pool of huge pages at fault time.
///
/// (CONFIG_HUGETLB_PAGE is required. Since Linux 2.6.17)
pub hugepages_rsvd: Option<u64>,
/// This is the number of huge pages in the pool above the value in /proc/sys/vm/nr_hugepages.
///
/// The maximum number of surplus huge pages is controlled by /proc/sys/vm/nr_overcommit_hugepages.
///
/// (CONFIG_HUGETLB_PAGE is required. Since Linux 2.6.24)
pub hugepages_surp: Option<u64>,
/// The size of huge pages.
///
/// (CONFIG_HUGETLB_PAGE is required.)
pub hugepagesize: Option<u64>,
/// Number of bytes of RAM linearly mapped by kernel in 4kB pages. (x86.)
///
/// (since Linux 2.6.27)
pub direct_map_4k: Option<u64>,
/// Number of bytes of RAM linearly mapped by kernel in 4MB pages.
///
/// (x86 with CONFIG_X86_64 or CONFIG_X86_PAE enabled. Since Linux 2.6.27)
pub direct_map_4M: Option<u64>,
/// Number of bytes of RAM linearly mapped by kernel in 2MB pages.
///
/// (x86 with neither CONFIG_X86_64 nor CONFIG_X86_PAE enabled. Since Linux 2.6.27)
pub direct_map_2M: Option<u64>,
/// (x86 with CONFIG_X86_64 and CONFIG_X86_DIRECT_GBPAGES enabled. Since Linux 2.6.27)
pub direct_map_1G: Option<u64>,
/// needs documentation
pub hugetlb: Option<u64>,
/// Memory allocated to the per-cpu alloctor used to back per-cpu allocations.
///
/// This stat excludes the cost of metadata.
pub per_cpu: Option<u64>,
/// Kernel allocations that the kernel will attempt to reclaim under memory pressure.
///
/// Includes s_reclaimable, and other direct allocations with a shrinker.
pub k_reclaimable: Option<u64>,
/// Undocumented field
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is requried. Since Linux 5.4)
pub file_pmd_mapped: Option<u64>,
/// Undocumented field
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 5.4)
pub file_huge_pages: Option<u64>,
}
impl Meminfo {
/// Reads and parses the `/proc/meminfo`, returning an error if there are problems.
pub fn new() -> ProcResult<Meminfo> {
let f = FileWrapper::open("/proc/meminfo")?;
Meminfo::from_reader(f)
}
/// Get Meminfo from a custom Read instead of the default `/proc/meminfo`.
pub fn from_reader<R: io::Read>(r: R) -> ProcResult<Meminfo> {
use std::collections::HashMap;
use std::io::{BufRead, BufReader};
let reader = BufReader::new(r);
let mut map = HashMap::new();
for line in reader.lines() {
let line = expect!(line);
if line.is_empty() {
continue;
}
let mut s = line.split_whitespace();
let field = expect!(s.next(), "no field");
let value = expect!(s.next(), "no value");
let unit = s.next(); // optional
let value = from_str!(u64, value);
let value = if let Some(unit) = unit {
convert_to_kibibytes(value, unit)?
} else {
value
};
map.insert(field[..field.len() - 1].to_string(), value);
}
// use 'remove' to move the value out of the hashmap
// if there's anything still left in the map at the end, that
// means we probably have a bug/typo, or are out-of-date
let meminfo = Meminfo {
mem_total: expect!(map.remove("MemTotal")),
mem_free: expect!(map.remove("MemFree")),
mem_available: map.remove("MemAvailable"),
buffers: expect!(map.remove("Buffers")),
cached: expect!(map.remove("Cached")),
swap_cached: expect!(map.remove("SwapCached")),
active: expect!(map.remove("Active")),
inactive: expect!(map.remove("Inactive")),
active_anon: map.remove("Active(anon)"),
inactive_anon: map.remove("Inactive(anon)"),
active_file: map.remove("Active(file)"),
inactive_file: map.remove("Inactive(file)"),
unevictable: map.remove("Unevictable"),
mlocked: map.remove("Mlocked"),
high_total: map.remove("HighTotal"),
high_free: map.remove("HighFree"),
low_total: map.remove("LowTotal"),
low_free: map.remove("LowFree"),
mmap_copy: map.remove("MmapCopy"),
swap_total: expect!(map.remove("SwapTotal")),
swap_free: expect!(map.remove("SwapFree")),
dirty: expect!(map.remove("Dirty")),
writeback: expect!(map.remove("Writeback")),
anon_pages: map.remove("AnonPages"),
mapped: expect!(map.remove("Mapped")),
shmem: map.remove("Shmem"),
slab: expect!(map.remove("Slab")),
s_reclaimable: map.remove("SReclaimable"),
s_unreclaim: map.remove("SUnreclaim"),
kernel_stack: map.remove("KernelStack"),
page_tables: map.remove("PageTables"),
quicklists: map.remove("Quicklists"),
nfs_unstable: map.remove("NFS_Unstable"),
bounce: map.remove("Bounce"),
writeback_tmp: map.remove("WritebackTmp"),
commit_limit: map.remove("CommitLimit"),
committed_as: expect!(map.remove("Committed_AS")),
vmalloc_total: expect!(map.remove("VmallocTotal")),
vmalloc_used: expect!(map.remove("VmallocUsed")),
vmalloc_chunk: expect!(map.remove("VmallocChunk")),
hardware_corrupted: map.remove("HardwareCorrupted"),
anon_hugepages: map.remove("AnonHugePages"),
shmem_hugepages: map.remove("ShmemHugePages"),
shmem_pmd_mapped: map.remove("ShmemPmdMapped"),
cma_total: map.remove("CmaTotal"),
cma_free: map.remove("CmaFree"),
hugepages_total: map.remove("HugePages_Total"),
hugepages_free: map.remove("HugePages_Free"),
hugepages_rsvd: map.remove("HugePages_Rsvd"),
hugepages_surp: map.remove("HugePages_Surp"),
hugepagesize: map.remove("Hugepagesize"),
direct_map_4k: map.remove("DirectMap4k"),
direct_map_4M: map.remove("DirectMap4M"),
direct_map_2M: map.remove("DirectMap2M"),
direct_map_1G: map.remove("DirectMap1G"),
k_reclaimable: map.remove("KReclaimable"),
per_cpu: map.remove("Percpu"),
hugetlb: map.remove("Hugetlb"),
file_pmd_mapped: map.remove("FilePmdMapped"),
file_huge_pages: map.remove("FileHugePages"),
};
if cfg!(test) {
assert!(map.is_empty(), "meminfo map is not empty: {:#?}", map);
}
Ok(meminfo)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{kernel_config, KernelVersion};
#[allow(clippy::cognitive_complexity)]
#[allow(clippy::blocks_in_if_conditions)]
#[test]
fn test_meminfo() {
// TRAVIS
// we don't have access to the kernel_config on travis, so skip that test there
match ::std::env::var("TRAVIS") {
Ok(ref s) if s == "true" => return,
_ => {}
}
let kernel = KernelVersion::current().unwrap();
let config = kernel_config().ok();
let meminfo = Meminfo::new().unwrap();
println!("{:#?}", meminfo);
// for the fields that are only present in some kernel versions, make sure our
// actual kernel agrees
if kernel >= KernelVersion::new(3, 14, 0) {
assert!(meminfo.mem_available.is_some());
}
if kernel >= KernelVersion::new(2, 6, 28) {
assert!(meminfo.active_anon.is_some());
assert!(meminfo.inactive_anon.is_some());
assert!(meminfo.active_file.is_some());
assert!(meminfo.inactive_file.is_some());
} else {
assert!(meminfo.active_anon.is_none());
assert!(meminfo.inactive_anon.is_none());
assert!(meminfo.active_file.is_none());
assert!(meminfo.inactive_file.is_none());
}
if kernel >= KernelVersion::new(2, 6, 28)
&& kernel <= KernelVersion::new(2, 6, 30)
&& meminfo.unevictable.is_some()
{
if let Some(ref config) = config {
assert!(config.get("CONFIG_UNEVICTABLE_LRU").is_some());
}
}
if kernel >= KernelVersion::new(2, 6, 19)
&& config.as_ref().map_or(false, |cfg| cfg.contains_key("CONFIG_HIGHMEM"))
{
assert!(meminfo.high_total.is_some());
assert!(meminfo.high_free.is_some());
assert!(meminfo.low_total.is_some());
assert!(meminfo.low_free.is_some());
} else {
assert!(meminfo.high_total.is_none());
assert!(meminfo.high_free.is_none());
assert!(meminfo.low_total.is_none());
assert!(meminfo.low_free.is_none());
}
// Possible bug in procfs documentation:
// The man page says that MmapCopy requires CONFIG_MMU, but if you look at the
// source, MmapCopy is only included if CONFIG_MMU is *missing*:
// https://github.com/torvalds/linux/blob/v4.17/fs/proc/meminfo.c#L80
//if kernel >= KernelVersion::new(2, 6, 29) && config.contains_key("CONFIG_MMU") {
// assert!(meminfo.mmap_copy.is_some());
//} else {
// assert!(meminfo.mmap_copy.is_none());
//}
if kernel >= KernelVersion::new(2, 6, 18) {
assert!(meminfo.anon_pages.is_some());
assert!(meminfo.page_tables.is_some());
assert!(meminfo.nfs_unstable.is_some());
assert!(meminfo.bounce.is_some());
} else {
assert!(meminfo.anon_pages.is_none());
assert!(meminfo.page_tables.is_none());
assert!(meminfo.nfs_unstable.is_none());
assert!(meminfo.bounce.is_none());
}
if kernel >= KernelVersion::new(2, 6, 32) {
assert!(meminfo.shmem.is_some());
assert!(meminfo.kernel_stack.is_some());
} else {
assert!(meminfo.shmem.is_none());
assert!(meminfo.kernel_stack.is_none());
}
if kernel >= KernelVersion::new(2, 6, 19) {
assert!(meminfo.s_reclaimable.is_some());
assert!(meminfo.s_unreclaim.is_some());
} else {
assert!(meminfo.s_reclaimable.is_none());
assert!(meminfo.s_unreclaim.is_none());
}
if kernel >= KernelVersion::new(2, 6, 27)
&& config
.as_ref()
.map_or(false, |cfg| cfg.contains_key("CONFIG_QUICKLIST"))
{
assert!(meminfo.quicklists.is_some());
} else {
assert!(meminfo.quicklists.is_none());
}
if kernel >= KernelVersion::new(2, 6, 26) {
assert!(meminfo.writeback_tmp.is_some());
} else {
assert!(meminfo.writeback_tmp.is_none());
}
if kernel >= KernelVersion::new(2, 6, 10) {
assert!(meminfo.commit_limit.is_some());
} else {
assert!(meminfo.commit_limit.is_none());
}
if kernel >= KernelVersion::new(2, 6, 32)
&& config
.as_ref()
.map_or(std::path::Path::new("/proc/kpagecgroup").exists(), |cfg| {
cfg.contains_key("CONFIG_MEMORY_FAILURE")
})
{
assert!(meminfo.hardware_corrupted.is_some());
} else {
assert!(meminfo.hardware_corrupted.is_none());
}
if kernel >= KernelVersion::new(2, 6, 38)
&& config
.as_ref()
.map_or(false, |cfg| cfg.contains_key("CONFIG_TRANSPARENT_HUGEPAGE"))
{
assert!(meminfo.anon_hugepages.is_some());
} else {
// SOme distributions may backport this option into older kernels
// assert!(meminfo.anon_hugepages.is_none());
}
if kernel >= KernelVersion::new(4, 8, 0)
&& config
.as_ref()
.map_or(true, |cfg| cfg.contains_key("CONFIG_TRANSPARENT_HUGEPAGE"))
{
assert!(meminfo.shmem_hugepages.is_some());
assert!(meminfo.shmem_pmd_mapped.is_some());
} else {
assert!(meminfo.shmem_hugepages.is_none());
assert!(meminfo.shmem_pmd_mapped.is_none());
}
if kernel >= KernelVersion::new(3, 1, 0) && config.as_ref().map_or(true, |cfg| cfg.contains_key("CONFIG_CMA")) {
assert!(meminfo.cma_total.is_some());
assert!(meminfo.cma_free.is_some());
} else {
assert!(meminfo.cma_total.is_none());
assert!(meminfo.cma_free.is_none());
}
if config
.as_ref()
.map_or(true, |cfg| cfg.contains_key("CONFIG_HUGETLB_PAGE"))
{
assert!(meminfo.hugepages_total.is_some());
assert!(meminfo.hugepages_free.is_some());
assert!(meminfo.hugepagesize.is_some());
} else {
assert!(meminfo.hugepages_total.is_none());
assert!(meminfo.hugepages_free.is_none());
assert!(meminfo.hugepagesize.is_none());
}
if kernel >= KernelVersion::new(2, 6, 17)
&& config
.as_ref()
.map_or(true, |cfg| cfg.contains_key("CONFIG_HUGETLB_PAGE"))
{
assert!(meminfo.hugepages_rsvd.is_some());
} else {
assert!(meminfo.hugepages_rsvd.is_none());
}
if kernel >= KernelVersion::new(2, 6, 24)
&& config
.as_ref()
.map_or(true, |cfg| cfg.contains_key("CONFIG_HUGETLB_PAGE"))
{
assert!(meminfo.hugepages_surp.is_some());
} else {
assert!(meminfo.hugepages_surp.is_none());
}
}
}

813
vendor/procfs/src/net.rs vendored Normal file
View file

@ -0,0 +1,813 @@
// Don't throw clippy warnings for manual string stripping.
// The suggested fix with `strip_prefix` removes support for Rust 1.33 and 1.38
#![allow(clippy::manual_strip)]
//! Information about the networking layer.
//!
//! This module corresponds to the `/proc/net` directory and contains various information about the
//! networking layer.
//!
//! # Example
//!
//! Here's an example that will print out all of the open and listening TCP sockets, and their
//! corresponding processes, if know. This mimics the "netstat" utility, but for TCP only. You
//! can run this example yourself with:
//!
//! > cargo run --example=netstat
//!
//! ```rust
//! # use procfs::process::{FDTarget, Stat};
//! # use std::collections::HashMap;
//! let all_procs = procfs::process::all_processes().unwrap();
//!
//! // build up a map between socket inodes and process stat info:
//! let mut map: HashMap<u64, Stat> = HashMap::new();
//! for p in all_procs {
//! let process = p.unwrap();
//! if let (Ok(stat), Ok(fds)) = (process.stat(), process.fd()) {
//! for fd in fds {
//! if let FDTarget::Socket(inode) = fd.unwrap().target {
//! map.insert(inode, stat.clone());
//! }
//! }
//! }
//! }
//!
//! // get the tcp table
//! let tcp = procfs::net::tcp().unwrap();
//! let tcp6 = procfs::net::tcp6().unwrap();
//! println!("{:<26} {:<26} {:<15} {:<8} {}", "Local address", "Remote address", "State", "Inode", "PID/Program name");
//! for entry in tcp.into_iter().chain(tcp6) {
//! // find the process (if any) that has an open FD to this entry's inode
//! let local_address = format!("{}", entry.local_address);
//! let remote_addr = format!("{}", entry.remote_address);
//! let state = format!("{:?}", entry.state);
//! if let Some(stat) = map.get(&entry.inode) {
//! println!("{:<26} {:<26} {:<15} {:<12} {}/{}", local_address, remote_addr, state, entry.inode, stat.pid, stat.comm);
//! } else {
//! // We might not always be able to find the process associated with this socket
//! println!("{:<26} {:<26} {:<15} {:<12} -", local_address, remote_addr, state, entry.inode);
//! }
//! }
use crate::from_iter;
use crate::ProcResult;
use std::collections::HashMap;
use crate::FileWrapper;
use bitflags::bitflags;
use byteorder::{ByteOrder, NativeEndian, NetworkEndian};
use std::io::{BufRead, BufReader, Read};
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::{path::PathBuf, str::FromStr};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum TcpState {
Established = 1,
SynSent,
SynRecv,
FinWait1,
FinWait2,
TimeWait,
Close,
CloseWait,
LastAck,
Listen,
Closing,
NewSynRecv,
}
impl TcpState {
pub fn from_u8(num: u8) -> Option<TcpState> {
match num {
0x01 => Some(TcpState::Established),
0x02 => Some(TcpState::SynSent),
0x03 => Some(TcpState::SynRecv),
0x04 => Some(TcpState::FinWait1),
0x05 => Some(TcpState::FinWait2),
0x06 => Some(TcpState::TimeWait),
0x07 => Some(TcpState::Close),
0x08 => Some(TcpState::CloseWait),
0x09 => Some(TcpState::LastAck),
0x0A => Some(TcpState::Listen),
0x0B => Some(TcpState::Closing),
0x0C => Some(TcpState::NewSynRecv),
_ => None,
}
}
pub fn to_u8(&self) -> u8 {
match self {
TcpState::Established => 0x01,
TcpState::SynSent => 0x02,
TcpState::SynRecv => 0x03,
TcpState::FinWait1 => 0x04,
TcpState::FinWait2 => 0x05,
TcpState::TimeWait => 0x06,
TcpState::Close => 0x07,
TcpState::CloseWait => 0x08,
TcpState::LastAck => 0x09,
TcpState::Listen => 0x0A,
TcpState::Closing => 0x0B,
TcpState::NewSynRecv => 0x0C,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum UdpState {
Established = 1,
Close = 7,
}
impl UdpState {
pub fn from_u8(num: u8) -> Option<UdpState> {
match num {
0x01 => Some(UdpState::Established),
0x07 => Some(UdpState::Close),
_ => None,
}
}
pub fn to_u8(&self) -> u8 {
match self {
UdpState::Established => 0x01,
UdpState::Close => 0x07,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum UnixState {
UNCONNECTED = 1,
CONNECTING = 2,
CONNECTED = 3,
DISCONNECTING = 4,
}
impl UnixState {
pub fn from_u8(num: u8) -> Option<UnixState> {
match num {
0x01 => Some(UnixState::UNCONNECTED),
0x02 => Some(UnixState::CONNECTING),
0x03 => Some(UnixState::CONNECTED),
0x04 => Some(UnixState::DISCONNECTING),
_ => None,
}
}
pub fn to_u8(&self) -> u8 {
match self {
UnixState::UNCONNECTED => 0x01,
UnixState::CONNECTING => 0x02,
UnixState::CONNECTED => 0x03,
UnixState::DISCONNECTING => 0x04,
}
}
}
/// An entry in the TCP socket table
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct TcpNetEntry {
pub local_address: SocketAddr,
pub remote_address: SocketAddr,
pub state: TcpState,
pub rx_queue: u32,
pub tx_queue: u32,
pub inode: u64,
}
/// An entry in the UDP socket table
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct UdpNetEntry {
pub local_address: SocketAddr,
pub remote_address: SocketAddr,
pub state: UdpState,
pub rx_queue: u32,
pub tx_queue: u32,
pub inode: u64,
}
/// An entry in the Unix socket table
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct UnixNetEntry {
/// The number of users of the socket
pub ref_count: u32,
/// The socket type.
///
/// Possible values are `SOCK_STREAM`, `SOCK_DGRAM`, or `SOCK_SEQPACKET`. These constants can
/// be found in the libc crate.
pub socket_type: u16,
/// The state of the socket
pub state: UnixState,
/// The inode number of the socket
pub inode: u64,
/// The bound pathname (if any) of the socket.
///
/// Sockets in the abstract namespace are included, and are shown with a path that commences
/// with the '@' character.
pub path: Option<PathBuf>,
}
/// Parses an address in the form 00010203:1234
///
/// Also supports IPv6
fn parse_addressport_str(s: &str) -> ProcResult<SocketAddr> {
let mut las = s.split(':');
let ip_part = expect!(las.next(), "ip_part");
let port = expect!(las.next(), "port");
let port = from_str!(u16, port, 16);
if ip_part.len() == 8 {
let bytes = expect!(hex::decode(ip_part));
let ip_u32 = NetworkEndian::read_u32(&bytes);
let ip = Ipv4Addr::new(
(ip_u32 & 0xff) as u8,
((ip_u32 & 0xff << 8) >> 8) as u8,
((ip_u32 & 0xff << 16) >> 16) as u8,
((ip_u32 & 0xff << 24) >> 24) as u8,
);
Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
} else if ip_part.len() == 32 {
let bytes = expect!(hex::decode(ip_part));
let ip_a = NativeEndian::read_u32(&bytes[0..]);
let ip_b = NativeEndian::read_u32(&bytes[4..]);
let ip_c = NativeEndian::read_u32(&bytes[8..]);
let ip_d = NativeEndian::read_u32(&bytes[12..]);
let ip = Ipv6Addr::new(
((ip_a >> 16) & 0xffff) as u16,
(ip_a & 0xffff) as u16,
((ip_b >> 16) & 0xffff) as u16,
(ip_b & 0xffff) as u16,
((ip_c >> 16) & 0xffff) as u16,
(ip_c & 0xffff) as u16,
((ip_d >> 16) & 0xffff) as u16,
(ip_d & 0xffff) as u16,
);
Ok(SocketAddr::V6(SocketAddrV6::new(ip, port, 0, 0)))
} else {
Err(build_internal_error!(format!(
"Unable to parse {:?} as an address:port",
s
)))
}
}
/// Reads TCP socket table from the provided `reader`.
pub fn read_tcp_table<R: Read>(reader: BufReader<R>) -> ProcResult<Vec<TcpNetEntry>> {
let mut vec = Vec::new();
// first line is a header we need to skip
for line in reader.lines().skip(1) {
let line = line?;
let mut s = line.split_whitespace();
s.next();
let local_address = expect!(s.next(), "tcp::local_address");
let rem_address = expect!(s.next(), "tcp::rem_address");
let state = expect!(s.next(), "tcp::st");
let mut tx_rx_queue = expect!(s.next(), "tcp::tx_queue:rx_queue").splitn(2, ':');
let tx_queue = from_str!(u32, expect!(tx_rx_queue.next(), "tcp::tx_queue"), 16);
let rx_queue = from_str!(u32, expect!(tx_rx_queue.next(), "tcp::rx_queue"), 16);
s.next(); // skip tr and tm->when
s.next(); // skip retrnsmt
s.next(); // skip uid
s.next(); // skip timeout
let inode = expect!(s.next(), "tcp::inode");
vec.push(TcpNetEntry {
local_address: parse_addressport_str(local_address)?,
remote_address: parse_addressport_str(rem_address)?,
rx_queue,
tx_queue,
state: expect!(TcpState::from_u8(from_str!(u8, state, 16))),
inode: from_str!(u64, inode),
});
}
Ok(vec)
}
/// Reads UDP socket table from the provided `reader`.
pub fn read_udp_table<R: Read>(reader: BufReader<R>) -> ProcResult<Vec<UdpNetEntry>> {
let mut vec = Vec::new();
// first line is a header we need to skip
for line in reader.lines().skip(1) {
let line = line?;
let mut s = line.split_whitespace();
s.next();
let local_address = expect!(s.next(), "udp::local_address");
let rem_address = expect!(s.next(), "udp::rem_address");
let state = expect!(s.next(), "udp::st");
let mut tx_rx_queue = expect!(s.next(), "udp::tx_queue:rx_queue").splitn(2, ':');
let tx_queue: u32 = from_str!(u32, expect!(tx_rx_queue.next(), "udp::tx_queue"), 16);
let rx_queue: u32 = from_str!(u32, expect!(tx_rx_queue.next(), "udp::rx_queue"), 16);
s.next(); // skip tr and tm->when
s.next(); // skip retrnsmt
s.next(); // skip uid
s.next(); // skip timeout
let inode = expect!(s.next(), "udp::inode");
vec.push(UdpNetEntry {
local_address: parse_addressport_str(local_address)?,
remote_address: parse_addressport_str(rem_address)?,
rx_queue,
tx_queue,
state: expect!(UdpState::from_u8(from_str!(u8, state, 16))),
inode: from_str!(u64, inode),
});
}
Ok(vec)
}
/// Reads the tcp socket table
pub fn tcp() -> ProcResult<Vec<TcpNetEntry>> {
let file = FileWrapper::open("/proc/net/tcp")?;
read_tcp_table(BufReader::new(file))
}
/// Reads the tcp6 socket table
pub fn tcp6() -> ProcResult<Vec<TcpNetEntry>> {
let file = FileWrapper::open("/proc/net/tcp6")?;
read_tcp_table(BufReader::new(file))
}
/// Reads the udp socket table
pub fn udp() -> ProcResult<Vec<UdpNetEntry>> {
let file = FileWrapper::open("/proc/net/udp")?;
read_udp_table(BufReader::new(file))
}
/// Reads the udp6 socket table
pub fn udp6() -> ProcResult<Vec<UdpNetEntry>> {
let file = FileWrapper::open("/proc/net/udp6")?;
read_udp_table(BufReader::new(file))
}
/// Reads the unix socket table
pub fn unix() -> ProcResult<Vec<UnixNetEntry>> {
let file = FileWrapper::open("/proc/net/unix")?;
let reader = BufReader::new(file);
let mut vec = Vec::new();
// first line is a header we need to skip
for line in reader.lines().skip(1) {
let line = line?;
let mut s = line.split_whitespace();
s.next(); // skip table slot number
let ref_count = from_str!(u32, expect!(s.next()), 16);
s.next(); // skip protocol, always zero
s.next(); // skip internal kernel flags
let socket_type = from_str!(u16, expect!(s.next()), 16);
let state = from_str!(u8, expect!(s.next()), 16);
let inode = from_str!(u64, expect!(s.next()));
let path = s.next().map(PathBuf::from);
vec.push(UnixNetEntry {
ref_count,
socket_type,
inode,
state: expect!(UnixState::from_u8(state)),
path,
});
}
Ok(vec)
}
/// An entry in the ARP table
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ARPEntry {
/// IPv4 address
pub ip_address: Ipv4Addr,
/// Hardware type
///
/// This will almost always be ETHER (or maybe INFINIBAND)
pub hw_type: ARPHardware,
/// Internal kernel flags
pub flags: ARPFlags,
/// MAC Address
pub hw_address: Option<[u8; 6]>,
/// Device name
pub device: String,
}
bitflags! {
/// Hardware type for an ARP table entry.
// source: include/uapi/linux/if_arp.h
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ARPHardware: u32 {
/// NET/ROM pseudo
const NETROM = 0;
/// Ethernet
const ETHER = 1;
/// Experimental ethernet
const EETHER = 2;
/// AX.25 Level 2
const AX25 = 3;
/// PROnet token ring
const PRONET = 4;
/// Chaosnet
const CHAOS = 5;
/// IEEE 802.2 Ethernet/TR/TB
const IEEE802 = 6;
/// Arcnet
const ARCNET = 7;
/// APPLEtalk
const APPLETLK = 8;
/// Frame Relay DLCI
const DLCI = 15;
/// ATM
const ATM = 19;
/// Metricom STRIP
const METRICOM = 23;
//// IEEE 1394 IPv4 - RFC 2734
const IEEE1394 = 24;
/// EUI-64
const EUI64 = 27;
/// InfiniBand
const INFINIBAND = 32;
}
}
bitflags! {
/// Flags for ARP entries
// source: include/uapi/linux/if_arp.h
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ARPFlags: u32 {
/// Completed entry
const COM = 0x02;
/// Permanent entry
const PERM = 0x04;
/// Publish entry
const PUBL = 0x08;
/// Has requested trailers
const USETRAILERS = 0x10;
/// Want to use a netmask (only for proxy entries)
const NETMASK = 0x20;
// Don't answer this address
const DONTPUB = 0x40;
}
}
/// Reads the ARP table
pub fn arp() -> ProcResult<Vec<ARPEntry>> {
let file = FileWrapper::open("/proc/net/arp")?;
let reader = BufReader::new(file);
let mut vec = Vec::new();
// First line is a header we need to skip
for line in reader.lines().skip(1) {
// Check if there might have been an IO error.
let line = line?;
let mut line = line.split_whitespace();
let ip_address = expect!(Ipv4Addr::from_str(expect!(line.next())));
let hw = from_str!(u32, &expect!(line.next())[2..], 16);
let hw = ARPHardware::from_bits_truncate(hw);
let flags = from_str!(u32, &expect!(line.next())[2..], 16);
let flags = ARPFlags::from_bits_truncate(flags);
let mac = expect!(line.next());
let mut mac: Vec<Result<u8, _>> = mac.split(':').map(|s| Ok(from_str!(u8, s, 16))).collect();
let mac = if mac.len() == 6 {
let mac_block_f = mac.pop().unwrap()?;
let mac_block_e = mac.pop().unwrap()?;
let mac_block_d = mac.pop().unwrap()?;
let mac_block_c = mac.pop().unwrap()?;
let mac_block_b = mac.pop().unwrap()?;
let mac_block_a = mac.pop().unwrap()?;
if mac_block_a == 0
&& mac_block_b == 0
&& mac_block_c == 0
&& mac_block_d == 0
&& mac_block_e == 0
&& mac_block_f == 0
{
None
} else {
Some([
mac_block_a,
mac_block_b,
mac_block_c,
mac_block_d,
mac_block_e,
mac_block_f,
])
}
} else {
None
};
// mask is always "*"
let _mask = expect!(line.next());
let dev = expect!(line.next());
vec.push(ARPEntry {
ip_address,
hw_type: hw,
flags,
hw_address: mac,
device: dev.to_string(),
})
}
Ok(vec)
}
/// General statistics for a network interface/device
///
/// For an example, see the [interface_stats.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct DeviceStatus {
/// Name of the interface
pub name: String,
/// Total bytes received
pub recv_bytes: u64,
/// Total packets received
pub recv_packets: u64,
/// Bad packets received
pub recv_errs: u64,
/// Packets dropped
pub recv_drop: u64,
/// Fifo overrun
pub recv_fifo: u64,
/// Frame alignment errors
pub recv_frame: u64,
/// Number of compressed packets received
pub recv_compressed: u64,
/// Number of multicast packets received
pub recv_multicast: u64,
/// Total bytes transmitted
pub sent_bytes: u64,
/// Total packets transmitted
pub sent_packets: u64,
/// Number of transmission errors
pub sent_errs: u64,
/// Number of packets dropped during transmission
pub sent_drop: u64,
pub sent_fifo: u64,
/// Number of collisions
pub sent_colls: u64,
/// Number of packets not sent due to carrier errors
pub sent_carrier: u64,
/// Number of compressed packets transmitted
pub sent_compressed: u64,
}
impl DeviceStatus {
fn from_str(s: &str) -> ProcResult<DeviceStatus> {
let mut split = s.split_whitespace();
let name: String = expect!(from_iter(&mut split));
let recv_bytes = expect!(from_iter(&mut split));
let recv_packets = expect!(from_iter(&mut split));
let recv_errs = expect!(from_iter(&mut split));
let recv_drop = expect!(from_iter(&mut split));
let recv_fifo = expect!(from_iter(&mut split));
let recv_frame = expect!(from_iter(&mut split));
let recv_compressed = expect!(from_iter(&mut split));
let recv_multicast = expect!(from_iter(&mut split));
let sent_bytes = expect!(from_iter(&mut split));
let sent_packets = expect!(from_iter(&mut split));
let sent_errs = expect!(from_iter(&mut split));
let sent_drop = expect!(from_iter(&mut split));
let sent_fifo = expect!(from_iter(&mut split));
let sent_colls = expect!(from_iter(&mut split));
let sent_carrier = expect!(from_iter(&mut split));
let sent_compressed = expect!(from_iter(&mut split));
Ok(DeviceStatus {
name: name.trim_end_matches(':').to_owned(),
recv_bytes,
recv_packets,
recv_errs,
recv_drop,
recv_fifo,
recv_frame,
recv_compressed,
recv_multicast,
sent_bytes,
sent_packets,
sent_errs,
sent_drop,
sent_fifo,
sent_colls,
sent_carrier,
sent_compressed,
})
}
}
/// Returns basic network device statistics for all interfaces
///
/// This data is from the `/proc/net/dev` file.
///
/// For an example, see the [interface_stats.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
pub fn dev_status() -> ProcResult<HashMap<String, DeviceStatus>> {
let file = FileWrapper::open("/proc/net/dev")?;
let buf = BufReader::new(file);
let mut map = HashMap::new();
// the first two lines are headers, so skip them
for line in buf.lines().skip(2) {
let dev = DeviceStatus::from_str(&line?)?;
map.insert(dev.name.clone(), dev);
}
Ok(map)
}
/// An entry in the ipv4 route table
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct RouteEntry {
/// Interface to which packets for this route will be sent
pub iface: String,
/// The destination network or destination host
pub destination: Ipv4Addr,
pub gateway: Ipv4Addr,
pub flags: u16,
/// Number of references to this route
pub refcnt: u16,
/// Count of lookups for the route
pub in_use: u16,
/// The 'distance' to the target (usually counted in hops)
pub metrics: u32,
pub mask: Ipv4Addr,
/// Default maximum transmission unit for TCP connections over this route
pub mtu: u32,
/// Default window size for TCP connections over this route
pub window: u32,
/// Initial RTT (Round Trip Time)
pub irtt: u32,
}
/// Reads the ipv4 route table
///
/// This data is from the `/proc/net/route` file
pub fn route() -> ProcResult<Vec<RouteEntry>> {
let file = FileWrapper::open("/proc/net/route")?;
let reader = BufReader::new(file);
let mut vec = Vec::new();
// First line is a header we need to skip
for line in reader.lines().skip(1) {
// Check if there might have been an IO error.
let line = line?;
let mut line = line.split_whitespace();
// network interface name, e.g. eth0
let iface = expect!(line.next());
let destination = from_str!(u32, expect!(line.next()), 16).to_ne_bytes().into();
let gateway = from_str!(u32, expect!(line.next()), 16).to_ne_bytes().into();
let flags = from_str!(u16, expect!(line.next()), 16);
let refcnt = from_str!(u16, expect!(line.next()), 10);
let in_use = from_str!(u16, expect!(line.next()), 10);
let metrics = from_str!(u32, expect!(line.next()), 10);
let mask = from_str!(u32, expect!(line.next()), 16).to_ne_bytes().into();
let mtu = from_str!(u32, expect!(line.next()), 10);
let window = from_str!(u32, expect!(line.next()), 10);
let irtt = from_str!(u32, expect!(line.next()), 10);
vec.push(RouteEntry {
iface: iface.to_string(),
destination,
gateway,
flags,
refcnt,
in_use,
metrics,
mask,
mtu,
window,
irtt,
});
}
Ok(vec)
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
fn test_parse_ipaddr() {
use std::str::FromStr;
let addr = parse_addressport_str("0100007F:1234").unwrap();
assert_eq!(addr.port(), 0x1234);
match addr.ip() {
IpAddr::V4(addr) => assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1)),
_ => panic!("Not IPv4"),
}
// When you connect to [2a00:1450:4001:814::200e]:80 (ipv6.google.com) the entry with
// 5014002A14080140000000000E200000:0050 remote endpoint is created in /proc/net/tcp6
// on Linux 4.19.
let addr = parse_addressport_str("5014002A14080140000000000E200000:0050").unwrap();
assert_eq!(addr.port(), 80);
match addr.ip() {
IpAddr::V6(addr) => assert_eq!(addr, Ipv6Addr::from_str("2a00:1450:4001:814::200e").unwrap()),
_ => panic!("Not IPv6"),
}
// IPv6 test case from https://stackoverflow.com/questions/41940483/parse-ipv6-addresses-from-proc-net-tcp6-python-2-7/41948004#41948004
let addr = parse_addressport_str("B80D01200000000067452301EFCDAB89:0").unwrap();
assert_eq!(addr.port(), 0);
match addr.ip() {
IpAddr::V6(addr) => assert_eq!(addr, Ipv6Addr::from_str("2001:db8::123:4567:89ab:cdef").unwrap()),
_ => panic!("Not IPv6"),
}
let addr = parse_addressport_str("1234:1234");
assert!(addr.is_err());
}
#[test]
fn test_tcpstate_from() {
assert_eq!(TcpState::from_u8(0xA).unwrap(), TcpState::Listen);
}
#[test]
fn test_tcp() {
for entry in tcp().unwrap() {
println!("{:?}", entry);
assert_eq!(entry.state, TcpState::from_u8(entry.state.to_u8()).unwrap());
}
}
#[test]
fn test_tcp6() {
for entry in tcp6().unwrap() {
println!("{:?}", entry);
assert_eq!(entry.state, TcpState::from_u8(entry.state.to_u8()).unwrap());
}
}
#[test]
fn test_udp() {
for entry in udp().unwrap() {
println!("{:?}", entry);
assert_eq!(entry.state, UdpState::from_u8(entry.state.to_u8()).unwrap());
}
}
#[test]
fn test_udp6() {
for entry in udp6().unwrap() {
println!("{:?}", entry);
}
}
#[test]
fn test_unix() {
for entry in unix().unwrap() {
println!("{:?}", entry);
}
}
#[test]
fn test_dev_status() {
let status = dev_status().unwrap();
println!("{:#?}", status);
}
#[test]
fn test_arp() {
for entry in arp().unwrap() {
println!("{:?}", entry);
}
}
#[test]
fn test_route() {
for entry in route().unwrap() {
println!("{:?}", entry);
}
}
}

229
vendor/procfs/src/pressure.rs vendored Normal file
View file

@ -0,0 +1,229 @@
//! Pressure stall information retreived from `/proc/pressure/cpu`,
//! `/proc/pressure/memory` and `/proc/pressure/io`
//! may not be available on kernels older than 4.20.0
//! For reference: <https://lwn.net/Articles/759781/>
//!
//! See also: <https://www.kernel.org/doc/Documentation/accounting/psi.txt>
use crate::{ProcError, ProcResult};
use std::collections::HashMap;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Pressure stall information for either CPU, memory, or IO.
///
/// See also: <https://www.kernel.org/doc/Documentation/accounting/psi.txt>
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct PressureRecord {
/// 10 second window
///
/// The percentage of time, over a 10 second window, that either some or all tasks were stalled
/// waiting for a resource.
pub avg10: f32,
/// 60 second window
///
/// The percentage of time, over a 60 second window, that either some or all tasks were stalled
/// waiting for a resource.
pub avg60: f32,
/// 300 second window
///
/// The percentage of time, over a 300 second window, that either some or all tasks were stalled
/// waiting for a resource.
pub avg300: f32,
/// Total stall time (in microseconds).
pub total: u64,
}
/// CPU pressure information
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct CpuPressure {
pub some: PressureRecord,
}
impl CpuPressure {
/// Get CPU pressure information
pub fn new() -> ProcResult<CpuPressure> {
use std::fs::File;
use std::io::{BufRead, BufReader};
let file = File::open("/proc/pressure/cpu")?;
let mut reader = BufReader::new(file);
let mut some = String::new();
reader.read_line(&mut some)?;
Ok(CpuPressure {
some: parse_pressure_record(&some)?,
})
}
}
/// Memory pressure information
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MemoryPressure {
/// This record indicates the share of time in which at least some tasks are stalled
pub some: PressureRecord,
/// This record indicates this share of time in which all non-idle tasks are stalled
/// simultaneously.
pub full: PressureRecord,
}
impl MemoryPressure {
/// Get memory pressure information
pub fn new() -> ProcResult<MemoryPressure> {
let (some, full) = get_pressure("memory")?;
Ok(MemoryPressure { some, full })
}
}
/// IO pressure information
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct IoPressure {
/// This record indicates the share of time in which at least some tasks are stalled
pub some: PressureRecord,
/// This record indicates this share of time in which all non-idle tasks are stalled
/// simultaneously.
pub full: PressureRecord,
}
impl IoPressure {
/// Get IO pressure information
pub fn new() -> ProcResult<IoPressure> {
let (some, full) = get_pressure("io")?;
Ok(IoPressure { some, full })
}
}
fn get_f32(map: &HashMap<&str, &str>, value: &str) -> ProcResult<f32> {
map.get(value).map_or_else(
|| Err(ProcError::Incomplete(None)),
|v| v.parse::<f32>().map_err(|_| ProcError::Incomplete(None)),
)
}
fn get_total(map: &HashMap<&str, &str>) -> ProcResult<u64> {
map.get("total").map_or_else(
|| Err(ProcError::Incomplete(None)),
|v| v.parse::<u64>().map_err(|_| ProcError::Incomplete(None)),
)
}
fn parse_pressure_record(line: &str) -> ProcResult<PressureRecord> {
let mut parsed = HashMap::new();
if !line.starts_with("some") && !line.starts_with("full") {
return Err(ProcError::Incomplete(None));
}
let values = &line[5..];
for kv_str in values.split_whitespace() {
let kv_split = kv_str.split('=');
let vec: Vec<&str> = kv_split.collect();
if vec.len() == 2 {
parsed.insert(vec[0], vec[1]);
}
}
Ok(PressureRecord {
avg10: get_f32(&parsed, "avg10")?,
avg60: get_f32(&parsed, "avg60")?,
avg300: get_f32(&parsed, "avg300")?,
total: get_total(&parsed)?,
})
}
fn get_pressure(pressure_file: &str) -> ProcResult<(PressureRecord, PressureRecord)> {
use std::fs::File;
use std::io::{BufRead, BufReader};
let file = File::open(format!("/proc/pressure/{}", pressure_file))?;
let mut reader = BufReader::new(file);
let mut some = String::new();
reader.read_line(&mut some)?;
let mut full = String::new();
reader.read_line(&mut full)?;
Ok((parse_pressure_record(&some)?, parse_pressure_record(&full)?))
}
#[cfg(test)]
mod test {
use super::*;
use std::f32::EPSILON;
use std::path::Path;
#[allow(clippy::manual_range_contains)]
fn valid_percentage(value: f32) -> bool {
value >= 0.00 && value < 100.0
}
#[test]
fn test_parse_pressure_record() {
let record = parse_pressure_record("full avg10=2.10 avg60=0.12 avg300=0.00 total=391926").unwrap();
assert!(record.avg10 - 2.10 < EPSILON);
assert!(record.avg60 - 0.12 < EPSILON);
assert!(record.avg300 - 0.00 < EPSILON);
assert_eq!(record.total, 391_926);
}
#[test]
fn test_parse_pressure_record_errs() {
assert!(parse_pressure_record("avg10=2.10 avg60=0.12 avg300=0.00 total=391926").is_err());
assert!(parse_pressure_record("some avg10=2.10 avg300=0.00 total=391926").is_err());
assert!(parse_pressure_record("some avg10=2.10 avg60=0.00 avg300=0.00").is_err());
}
#[test]
fn test_mem_pressure() {
if !Path::new("/proc/pressure/memory").exists() {
return;
}
let mem_psi = MemoryPressure::new().unwrap();
assert!(valid_percentage(mem_psi.some.avg10));
assert!(valid_percentage(mem_psi.some.avg60));
assert!(valid_percentage(mem_psi.some.avg300));
assert!(valid_percentage(mem_psi.full.avg10));
assert!(valid_percentage(mem_psi.full.avg60));
assert!(valid_percentage(mem_psi.full.avg300));
}
#[test]
fn test_io_pressure() {
if !Path::new("/proc/pressure/io").exists() {
return;
}
let io_psi = IoPressure::new().unwrap();
assert!(valid_percentage(io_psi.some.avg10));
assert!(valid_percentage(io_psi.some.avg60));
assert!(valid_percentage(io_psi.some.avg300));
assert!(valid_percentage(io_psi.full.avg10));
assert!(valid_percentage(io_psi.full.avg60));
assert!(valid_percentage(io_psi.full.avg300));
}
#[test]
fn test_cpu_pressure() {
if !Path::new("/proc/pressure/cpu").exists() {
return;
}
let cpu_psi = CpuPressure::new().unwrap();
assert!(valid_percentage(cpu_psi.some.avg10));
assert!(valid_percentage(cpu_psi.some.avg60));
assert!(valid_percentage(cpu_psi.some.avg300));
}
}

309
vendor/procfs/src/process/limit.rs vendored Normal file
View file

@ -0,0 +1,309 @@
use crate::{FileWrapper, ProcError, ProcResult};
use std::collections::HashMap;
use std::io::{BufRead, BufReader, Read};
use std::str::FromStr;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
impl crate::process::Process {
/// Return the limits for this process
pub fn limits(&self) -> ProcResult<Limits> {
let file = FileWrapper::open_at(&self.root, &self.fd, "limits")?;
Limits::from_reader(file)
}
}
/// Process limits
///
/// For more details about each of these limits, see the `getrlimit` man page.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Limits {
/// Max Cpu Time
///
/// This is a limit, in seconds, on the amount of CPU time that the process can consume.
pub max_cpu_time: Limit,
/// Max file size
///
/// This is the maximum size in bytes of files that the process may create.
pub max_file_size: Limit,
/// Max data size
///
/// This is the maximum size of the process's data segment (initialized data, uninitialized
/// data, and heap).
pub max_data_size: Limit,
/// Max stack size
///
/// This is the maximum size of the process stack, in bytes.
pub max_stack_size: Limit,
/// Max core file size
///
/// This is the maximum size of a *core* file in bytes that the process may dump.
pub max_core_file_size: Limit,
/// Max resident set
///
/// This is a limit (in bytes) on the process's resident set (the number of virtual pages
/// resident in RAM).
pub max_resident_set: Limit,
/// Max processes
///
/// This is a limit on the number of extant process (or, more precisely on Linux, threads) for
/// the real user rID of the calling process.
pub max_processes: Limit,
/// Max open files
///
/// This specifies a value one greater than the maximum file descriptor number that can be
/// opened by this process.
pub max_open_files: Limit,
/// Max locked memory
///
/// This is the maximum number of bytes of memory that may be locked into RAM.
pub max_locked_memory: Limit,
/// Max address space
///
/// This is the maximum size of the process's virtual memory (address space).
pub max_address_space: Limit,
/// Max file locks
///
/// This is a limit on the combined number of flock locks and fcntl leases that this process
/// may establish.
pub max_file_locks: Limit,
/// Max pending signals
///
/// This is a limit on the number of signals that may be queued for the real user rID of the
/// calling process.
pub max_pending_signals: Limit,
/// Max msgqueue size
///
/// This is a limit on the number of bytes that can be allocated for POSIX message queues for
/// the real user rID of the calling process.
pub max_msgqueue_size: Limit,
/// Max nice priority
///
/// This specifies a ceiling to which the process's nice value can be raised using
/// `setpriority` or `nice`.
pub max_nice_priority: Limit,
/// Max realtime priority
///
/// This specifies a ceiling on the real-time priority that may be set for this process using
/// `sched_setscheduler` and `sched_setparam`.
pub max_realtime_priority: Limit,
/// Max realtime timeout
///
/// This is a limit (in microseconds) on the amount of CPU time that a process scheduled under
/// a real-time scheduling policy may consume without making a blocking system call.
pub max_realtime_timeout: Limit,
}
impl Limits {
fn from_reader<R: Read>(r: R) -> ProcResult<Limits> {
let bufread = BufReader::new(r);
let mut lines = bufread.lines();
let mut map = HashMap::new();
while let Some(Ok(line)) = lines.next() {
let line = line.trim();
if line.starts_with("Limit") {
continue;
}
let s: Vec<_> = line.split_whitespace().collect();
let l = s.len();
let (hard_limit, soft_limit, name) =
if line.starts_with("Max nice priority") || line.starts_with("Max realtime priority") {
// these two limits don't have units, and so need different offsets:
let hard_limit = expect!(s.get(l - 1)).to_owned();
let soft_limit = expect!(s.get(l - 2)).to_owned();
let name = s[0..l - 2].join(" ");
(hard_limit, soft_limit, name)
} else {
let hard_limit = expect!(s.get(l - 2)).to_owned();
let soft_limit = expect!(s.get(l - 3)).to_owned();
let name = s[0..l - 3].join(" ");
(hard_limit, soft_limit, name)
};
let _units = expect!(s.get(l - 1));
map.insert(name.to_owned(), (soft_limit.to_owned(), hard_limit.to_owned()));
}
let limits = Limits {
max_cpu_time: Limit::from_pair(expect!(map.remove("Max cpu time")))?,
max_file_size: Limit::from_pair(expect!(map.remove("Max file size")))?,
max_data_size: Limit::from_pair(expect!(map.remove("Max data size")))?,
max_stack_size: Limit::from_pair(expect!(map.remove("Max stack size")))?,
max_core_file_size: Limit::from_pair(expect!(map.remove("Max core file size")))?,
max_resident_set: Limit::from_pair(expect!(map.remove("Max resident set")))?,
max_processes: Limit::from_pair(expect!(map.remove("Max processes")))?,
max_open_files: Limit::from_pair(expect!(map.remove("Max open files")))?,
max_locked_memory: Limit::from_pair(expect!(map.remove("Max locked memory")))?,
max_address_space: Limit::from_pair(expect!(map.remove("Max address space")))?,
max_file_locks: Limit::from_pair(expect!(map.remove("Max file locks")))?,
max_pending_signals: Limit::from_pair(expect!(map.remove("Max pending signals")))?,
max_msgqueue_size: Limit::from_pair(expect!(map.remove("Max msgqueue size")))?,
max_nice_priority: Limit::from_pair(expect!(map.remove("Max nice priority")))?,
max_realtime_priority: Limit::from_pair(expect!(map.remove("Max realtime priority")))?,
max_realtime_timeout: Limit::from_pair(expect!(map.remove("Max realtime timeout")))?,
};
if cfg!(test) {
assert!(map.is_empty(), "Map isn't empty: {:?}", map);
}
Ok(limits)
}
}
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Limit {
pub soft_limit: LimitValue,
pub hard_limit: LimitValue,
}
impl Limit {
fn from_pair(l: (String, String)) -> ProcResult<Limit> {
let (soft, hard) = l;
Ok(Limit {
soft_limit: LimitValue::from_str(&soft)?,
hard_limit: LimitValue::from_str(&hard)?,
})
}
}
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LimitValue {
Unlimited,
Value(u64),
}
impl LimitValue {
#[cfg(test)]
pub(crate) fn as_limit(&self) -> Option<u64> {
match self {
LimitValue::Unlimited => None,
LimitValue::Value(v) => Some(*v),
}
}
}
impl FromStr for LimitValue {
type Err = ProcError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "unlimited" {
Ok(LimitValue::Unlimited)
} else {
Ok(LimitValue::Value(from_str!(u64, s)))
}
}
}
#[cfg(test)]
mod tests {
use crate::*;
use rustix::process::Resource;
#[test]
fn test_limits() {
let me = process::Process::myself().unwrap();
let limits = me.limits().unwrap();
println!("{:#?}", limits);
// Max cpu time
let lim = rustix::process::getrlimit(Resource::Cpu);
assert_eq!(lim.current, limits.max_cpu_time.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_cpu_time.hard_limit.as_limit());
// Max file size
let lim = rustix::process::getrlimit(Resource::Fsize);
assert_eq!(lim.current, limits.max_file_size.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_file_size.hard_limit.as_limit());
// Max data size
let lim = rustix::process::getrlimit(Resource::Data);
assert_eq!(lim.current, limits.max_data_size.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_data_size.hard_limit.as_limit());
// Max stack size
let lim = rustix::process::getrlimit(Resource::Stack);
assert_eq!(lim.current, limits.max_stack_size.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_stack_size.hard_limit.as_limit());
// Max core file size
let lim = rustix::process::getrlimit(Resource::Core);
assert_eq!(lim.current, limits.max_core_file_size.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_core_file_size.hard_limit.as_limit());
// Max resident set
let lim = rustix::process::getrlimit(Resource::Rss);
assert_eq!(lim.current, limits.max_resident_set.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_resident_set.hard_limit.as_limit());
// Max processes
let lim = rustix::process::getrlimit(Resource::Nproc);
assert_eq!(lim.current, limits.max_processes.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_processes.hard_limit.as_limit());
// Max open files
let lim = rustix::process::getrlimit(Resource::Nofile);
assert_eq!(lim.current, limits.max_open_files.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_open_files.hard_limit.as_limit());
// Max locked memory
let lim = rustix::process::getrlimit(Resource::Memlock);
assert_eq!(lim.current, limits.max_locked_memory.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_locked_memory.hard_limit.as_limit());
// Max address space
let lim = rustix::process::getrlimit(Resource::As);
assert_eq!(lim.current, limits.max_address_space.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_address_space.hard_limit.as_limit());
// Max file locks
let lim = rustix::process::getrlimit(Resource::Locks);
assert_eq!(lim.current, limits.max_file_locks.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_file_locks.hard_limit.as_limit());
// Max pending signals
let lim = rustix::process::getrlimit(Resource::Sigpending);
assert_eq!(lim.current, limits.max_pending_signals.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_pending_signals.hard_limit.as_limit());
// Max msgqueue size
let lim = rustix::process::getrlimit(Resource::Msgqueue);
assert_eq!(lim.current, limits.max_msgqueue_size.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_msgqueue_size.hard_limit.as_limit());
// Max nice priority
let lim = rustix::process::getrlimit(Resource::Nice);
assert_eq!(lim.current, limits.max_nice_priority.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_nice_priority.hard_limit.as_limit());
// Max realtime priority
let lim = rustix::process::getrlimit(Resource::Rtprio);
assert_eq!(lim.current, limits.max_realtime_priority.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_realtime_priority.hard_limit.as_limit());
// Max realtime timeout
let lim = rustix::process::getrlimit(Resource::Rttime);
assert_eq!(lim.current, limits.max_realtime_timeout.soft_limit.as_limit());
assert_eq!(lim.maximum, limits.max_realtime_timeout.hard_limit.as_limit());
}
}

1626
vendor/procfs/src/process/mod.rs vendored Normal file

File diff suppressed because it is too large Load diff

656
vendor/procfs/src/process/mount.rs vendored Normal file
View file

@ -0,0 +1,656 @@
use bitflags::bitflags;
use crate::{from_iter, FileWrapper, ProcResult};
use std::collections::HashMap;
use std::io::{BufRead, BufReader, Lines, Read};
use std::path::PathBuf;
use std::time::Duration;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
bitflags! {
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct NFSServerCaps: u32 {
const NFS_CAP_READDIRPLUS = 1;
const NFS_CAP_HARDLINKS = (1 << 1);
const NFS_CAP_SYMLINKS = (1 << 2);
const NFS_CAP_ACLS = (1 << 3);
const NFS_CAP_ATOMIC_OPEN = (1 << 4);
const NFS_CAP_LGOPEN = (1 << 5);
const NFS_CAP_FILEID = (1 << 6);
const NFS_CAP_MODE = (1 << 7);
const NFS_CAP_NLINK = (1 << 8);
const NFS_CAP_OWNER = (1 << 9);
const NFS_CAP_OWNER_GROUP = (1 << 10);
const NFS_CAP_ATIME = (1 << 11);
const NFS_CAP_CTIME = (1 << 12);
const NFS_CAP_MTIME = (1 << 13);
const NFS_CAP_POSIX_LOCK = (1 << 14);
const NFS_CAP_UIDGID_NOMAP = (1 << 15);
const NFS_CAP_STATEID_NFSV41 = (1 << 16);
const NFS_CAP_ATOMIC_OPEN_V1 = (1 << 17);
const NFS_CAP_SECURITY_LABEL = (1 << 18);
const NFS_CAP_SEEK = (1 << 19);
const NFS_CAP_ALLOCATE = (1 << 20);
const NFS_CAP_DEALLOCATE = (1 << 21);
const NFS_CAP_LAYOUTSTATS = (1 << 22);
const NFS_CAP_CLONE = (1 << 23);
const NFS_CAP_COPY = (1 << 24);
const NFS_CAP_OFFLOAD_CANCEL = (1 << 25);
}
}
impl super::Process {
/// Returns the [MountStat] data for this processes mount namespace.
pub fn mountstats(&self) -> ProcResult<Vec<MountStat>> {
let file = FileWrapper::open_at(&self.root, &self.fd, "mountstats")?;
MountStat::from_reader(file)
}
/// Returns info about the mountpoints in this this process's mount namespace
///
/// This data is taken from the `/proc/[pid]/mountinfo` file
///
/// (Since Linux 2.6.26)
pub fn mountinfo(&self) -> ProcResult<Vec<MountInfo>> {
let file = FileWrapper::open_at(&self.root, &self.fd, "mountinfo")?;
let bufread = BufReader::new(file);
let lines = bufread.lines();
let mut vec = Vec::new();
for line in lines {
vec.push(MountInfo::from_line(&line?)?);
}
Ok(vec)
}
}
/// Information about a specific mount in a process's mount namespace.
///
/// This data is taken from the `/proc/[pid]/mountinfo` file.
///
/// For an example, see the [mountinfo.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MountInfo {
/// Mount ID. A unique ID for the mount (but may be reused after `unmount`)
pub mnt_id: i32,
/// Parent mount ID. The ID of the parent mount (or of self for the root of the mount
/// namespace's mount tree).
///
/// If the parent mount point lies outside the process's root directory, the ID shown here
/// won't have a corresponding record in mountinfo whose mount ID matches this parent mount
/// ID (because mount points that lie outside the process's root directory are not shown in
/// mountinfo). As a special case of this point, the process's root mount point may have a
/// parent mount (for the initramfs filesystem) that lies outside the process's root
/// directory, and an entry for that mount point will not appear in mountinfo.
pub pid: i32,
/// The value of `st_dev` for files on this filesystem
pub majmin: String,
/// The pathname of the directory in the filesystem which forms the root of this mount.
pub root: String,
/// The pathname of the mount point relative to the process's root directory.
pub mount_point: PathBuf,
/// Per-mount options
pub mount_options: HashMap<String, Option<String>>,
/// Optional fields
pub opt_fields: Vec<MountOptFields>,
/// Filesystem type
pub fs_type: String,
/// Mount source
pub mount_source: Option<String>,
/// Per-superblock options.
pub super_options: HashMap<String, Option<String>>,
}
impl MountInfo {
pub(crate) fn from_line(line: &str) -> ProcResult<MountInfo> {
let mut split = line.split_whitespace();
let mnt_id = expect!(from_iter(&mut split));
let pid = expect!(from_iter(&mut split));
let majmin: String = expect!(from_iter(&mut split));
let root = expect!(from_iter(&mut split));
let mount_point = expect!(from_iter(&mut split));
let mount_options = {
let mut map = HashMap::new();
let all_opts = expect!(split.next());
for opt in all_opts.split(',') {
let mut s = opt.splitn(2, '=');
let opt_name = expect!(s.next());
map.insert(opt_name.to_owned(), s.next().map(|s| s.to_owned()));
}
map
};
let mut opt_fields = Vec::new();
loop {
let f = expect!(split.next());
if f == "-" {
break;
}
let mut s = f.split(':');
let opt = match expect!(s.next()) {
"shared" => {
let val = expect!(from_iter(&mut s));
MountOptFields::Shared(val)
}
"master" => {
let val = expect!(from_iter(&mut s));
MountOptFields::Master(val)
}
"propagate_from" => {
let val = expect!(from_iter(&mut s));
MountOptFields::PropagateFrom(val)
}
"unbindable" => MountOptFields::Unbindable,
_ => continue,
};
opt_fields.push(opt);
}
let fs_type: String = expect!(from_iter(&mut split));
let mount_source = match expect!(split.next()) {
"none" => None,
x => Some(x.to_owned()),
};
let super_options = {
let mut map = HashMap::new();
let all_opts = expect!(split.next());
for opt in all_opts.split(',') {
let mut s = opt.splitn(2, '=');
let opt_name = expect!(s.next());
map.insert(opt_name.to_owned(), s.next().map(|s| s.to_owned()));
}
map
};
Ok(MountInfo {
mnt_id,
pid,
majmin,
root,
mount_point,
mount_options,
opt_fields,
fs_type,
mount_source,
super_options,
})
}
}
/// Optional fields used in [MountInfo]
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum MountOptFields {
/// This mount point is shared in peer group. Each peer group has a unique ID that is
/// automatically generated by the kernel, and all mount points in the same peer group will
/// show the same ID
Shared(u32),
/// THis mount is a slave to the specified shared peer group.
Master(u32),
/// This mount is a slave and receives propagation from the shared peer group
PropagateFrom(u32),
/// This is an unbindable mount
Unbindable,
}
/// Mount information from `/proc/<pid>/mountstats`.
///
/// # Example:
///
/// ```
/// # use procfs::process::Process;
/// let stats = Process::myself().unwrap().mountstats().unwrap();
///
/// for mount in stats {
/// println!("{} mounted on {} wth type {}",
/// mount.device.unwrap_or("??".to_owned()),
/// mount.mount_point.display(),
/// mount.fs
/// );
/// }
/// ```
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MountStat {
/// The name of the mounted device
pub device: Option<String>,
/// The mountpoint within the filesystem tree
pub mount_point: PathBuf,
/// The filesystem type
pub fs: String,
/// If the mount is NFS, this will contain various NFS statistics
pub statistics: Option<MountNFSStatistics>,
}
impl MountStat {
pub fn from_reader<R: Read>(r: R) -> ProcResult<Vec<MountStat>> {
let mut v = Vec::new();
let bufread = BufReader::new(r);
let mut lines = bufread.lines();
while let Some(Ok(line)) = lines.next() {
if line.starts_with("device ") {
// line will be of the format:
// device proc mounted on /proc with fstype proc
let mut s = line.split_whitespace();
let device = Some(expect!(s.nth(1)).to_owned());
let mount_point = PathBuf::from(expect!(s.nth(2)));
let fs = expect!(s.nth(2)).to_owned();
let statistics = match s.next() {
Some(stats) if stats.starts_with("statvers=") => {
Some(MountNFSStatistics::from_lines(&mut lines, &stats[9..])?)
}
_ => None,
};
v.push(MountStat {
device,
mount_point,
fs,
statistics,
});
}
}
Ok(v)
}
}
/// Only NFS mounts provide additional statistics in `MountStat` entries.
//
// Thank you to Chris Siebenmann for their helpful work in documenting these structures:
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MountNFSStatistics {
/// The version of the NFS statistics block. Either "1.0" or "1.1".
pub version: String,
/// The mount options.
///
/// The meaning of these can be found in the manual pages for mount(5) and nfs(5)
pub opts: Vec<String>,
/// Duration the NFS mount has been in existence.
pub age: Duration,
// * fsc (?)
// * impl_id (NFSv4): Option<HashMap<String, Some(String)>>
/// NFS Capabilities.
///
/// See `include/linux/nfs_fs_sb.h`
///
/// Some known values:
/// * caps: server capabilities. See [NFSServerCaps].
/// * wtmult: server disk block size
/// * dtsize: readdir size
/// * bsize: server block size
pub caps: Vec<String>,
// * nfsv4 (NFSv4): Option<HashMap<String, Some(String)>>
pub sec: Vec<String>,
pub events: NFSEventCounter,
pub bytes: NFSByteCounter,
// * RPC iostats version:
// * xprt
// * per-op statistics
pub per_op_stats: NFSPerOpStats,
}
impl MountNFSStatistics {
// Keep reading lines until we get to a blank line
fn from_lines<B: BufRead>(r: &mut Lines<B>, statsver: &str) -> ProcResult<MountNFSStatistics> {
let mut parsing_per_op = false;
let mut opts: Option<Vec<String>> = None;
let mut age = None;
let mut caps = None;
let mut sec = None;
let mut bytes = None;
let mut events = None;
let mut per_op = HashMap::new();
while let Some(Ok(line)) = r.next() {
let line = line.trim();
if line.trim() == "" {
break;
}
if !parsing_per_op {
if let Some(stripped) = line.strip_prefix("opts:") {
opts = Some(stripped.trim().split(',').map(|s| s.to_string()).collect());
} else if let Some(stripped) = line.strip_prefix("age:") {
age = Some(Duration::from_secs(from_str!(u64, stripped.trim())));
} else if let Some(stripped) = line.strip_prefix("caps:") {
caps = Some(stripped.trim().split(',').map(|s| s.to_string()).collect());
} else if let Some(stripped) = line.strip_prefix("sec:") {
sec = Some(stripped.trim().split(',').map(|s| s.to_string()).collect());
} else if let Some(stripped) = line.strip_prefix("bytes:") {
bytes = Some(NFSByteCounter::from_str(stripped.trim())?);
} else if let Some(stripped) = line.strip_prefix("events:") {
events = Some(NFSEventCounter::from_str(stripped.trim())?);
}
if line == "per-op statistics" {
parsing_per_op = true;
}
} else {
let mut split = line.split(':');
let name = expect!(split.next()).to_string();
let stats = NFSOperationStat::from_str(expect!(split.next()))?;
per_op.insert(name, stats);
}
}
Ok(MountNFSStatistics {
version: statsver.to_string(),
opts: expect!(opts, "Failed to find opts field in nfs stats"),
age: expect!(age, "Failed to find age field in nfs stats"),
caps: expect!(caps, "Failed to find caps field in nfs stats"),
sec: expect!(sec, "Failed to find sec field in nfs stats"),
events: expect!(events, "Failed to find events section in nfs stats"),
bytes: expect!(bytes, "Failed to find bytes section in nfs stats"),
per_op_stats: per_op,
})
}
/// Attempts to parse the caps= value from the [caps](struct.MountNFSStatistics.html#structfield.caps) field.
pub fn server_caps(&self) -> ProcResult<Option<NFSServerCaps>> {
for data in &self.caps {
if let Some(stripped) = data.strip_prefix("caps=0x") {
let val = from_str!(u32, stripped, 16);
return Ok(NFSServerCaps::from_bits(val));
}
}
Ok(None)
}
}
/// Represents NFS data from `/proc/<pid>/mountstats` under the section `events`.
///
/// The underlying data structure in the kernel can be found under *fs/nfs/iostat.h* `nfs_iostat`.
/// The fields are documented in the kernel source only under *include/linux/nfs_iostat.h* `enum
/// nfs_stat_eventcounters`.
#[derive(Debug, Copy, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct NFSEventCounter {
pub inode_revalidate: u64,
pub deny_try_revalidate: u64,
pub data_invalidate: u64,
pub attr_invalidate: u64,
pub vfs_open: u64,
pub vfs_lookup: u64,
pub vfs_access: u64,
pub vfs_update_page: u64,
pub vfs_read_page: u64,
pub vfs_read_pages: u64,
pub vfs_write_page: u64,
pub vfs_write_pages: u64,
pub vfs_get_dents: u64,
pub vfs_set_attr: u64,
pub vfs_flush: u64,
pub vfs_fs_sync: u64,
pub vfs_lock: u64,
pub vfs_release: u64,
pub congestion_wait: u64,
pub set_attr_trunc: u64,
pub extend_write: u64,
pub silly_rename: u64,
pub short_read: u64,
pub short_write: u64,
pub delay: u64,
pub pnfs_read: u64,
pub pnfs_write: u64,
}
impl NFSEventCounter {
fn from_str(s: &str) -> ProcResult<NFSEventCounter> {
let mut s = s.split_whitespace();
Ok(NFSEventCounter {
inode_revalidate: from_str!(u64, expect!(s.next())),
deny_try_revalidate: from_str!(u64, expect!(s.next())),
data_invalidate: from_str!(u64, expect!(s.next())),
attr_invalidate: from_str!(u64, expect!(s.next())),
vfs_open: from_str!(u64, expect!(s.next())),
vfs_lookup: from_str!(u64, expect!(s.next())),
vfs_access: from_str!(u64, expect!(s.next())),
vfs_update_page: from_str!(u64, expect!(s.next())),
vfs_read_page: from_str!(u64, expect!(s.next())),
vfs_read_pages: from_str!(u64, expect!(s.next())),
vfs_write_page: from_str!(u64, expect!(s.next())),
vfs_write_pages: from_str!(u64, expect!(s.next())),
vfs_get_dents: from_str!(u64, expect!(s.next())),
vfs_set_attr: from_str!(u64, expect!(s.next())),
vfs_flush: from_str!(u64, expect!(s.next())),
vfs_fs_sync: from_str!(u64, expect!(s.next())),
vfs_lock: from_str!(u64, expect!(s.next())),
vfs_release: from_str!(u64, expect!(s.next())),
congestion_wait: from_str!(u64, expect!(s.next())),
set_attr_trunc: from_str!(u64, expect!(s.next())),
extend_write: from_str!(u64, expect!(s.next())),
silly_rename: from_str!(u64, expect!(s.next())),
short_read: from_str!(u64, expect!(s.next())),
short_write: from_str!(u64, expect!(s.next())),
delay: from_str!(u64, expect!(s.next())),
pnfs_read: from_str!(u64, expect!(s.next())),
pnfs_write: from_str!(u64, expect!(s.next())),
})
}
}
/// Represents NFS data from `/proc/<pid>/mountstats` under the section `bytes`.
///
/// The underlying data structure in the kernel can be found under *fs/nfs/iostat.h* `nfs_iostat`.
/// The fields are documented in the kernel source only under *include/linux/nfs_iostat.h* `enum
/// nfs_stat_bytecounters`
#[derive(Debug, Copy, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct NFSByteCounter {
pub normal_read: u64,
pub normal_write: u64,
pub direct_read: u64,
pub direct_write: u64,
pub server_read: u64,
pub server_write: u64,
pub pages_read: u64,
pub pages_write: u64,
}
impl NFSByteCounter {
fn from_str(s: &str) -> ProcResult<NFSByteCounter> {
let mut s = s.split_whitespace();
Ok(NFSByteCounter {
normal_read: from_str!(u64, expect!(s.next())),
normal_write: from_str!(u64, expect!(s.next())),
direct_read: from_str!(u64, expect!(s.next())),
direct_write: from_str!(u64, expect!(s.next())),
server_read: from_str!(u64, expect!(s.next())),
server_write: from_str!(u64, expect!(s.next())),
pages_read: from_str!(u64, expect!(s.next())),
pages_write: from_str!(u64, expect!(s.next())),
})
}
}
/// Represents NFS data from `/proc/<pid>/mountstats` under the section of `per-op statistics`.
///
/// Here is what the Kernel says about the attributes:
///
/// Regarding `operations`, `transmissions` and `major_timeouts`:
///
/// > These counters give an idea about how many request
/// > transmissions are required, on average, to complete that
/// > particular procedure. Some procedures may require more
/// > than one transmission because the server is unresponsive,
/// > the client is retransmitting too aggressively, or the
/// > requests are large and the network is congested.
///
/// Regarding `bytes_sent` and `bytes_recv`:
///
/// > These count how many bytes are sent and received for a
/// > given RPC procedure type. This indicates how much load a
/// > particular procedure is putting on the network. These
/// > counts include the RPC and ULP headers, and the request
/// > payload.
///
/// Regarding `cum_queue_time`, `cum_resp_time` and `cum_total_req_time`:
///
/// > The length of time an RPC request waits in queue before
/// > transmission, the network + server latency of the request,
/// > and the total time the request spent from init to release
/// > are measured.
///
/// (source: *include/linux/sunrpc/metrics.h* `struct rpc_iostats`)
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct NFSOperationStat {
/// Count of rpc operations.
pub operations: u64,
/// Count of rpc transmissions
pub transmissions: u64,
/// Count of rpc major timeouts
pub major_timeouts: u64,
/// Count of bytes send. Does not only include the RPC payload but the RPC headers as well.
pub bytes_sent: u64,
/// Count of bytes received as `bytes_sent`.
pub bytes_recv: u64,
/// How long all requests have spend in the queue before being send.
pub cum_queue_time: Duration,
/// How long it took to get a response back.
pub cum_resp_time: Duration,
/// How long all requests have taken from beeing queued to the point they where completely
/// handled.
pub cum_total_req_time: Duration,
}
impl NFSOperationStat {
fn from_str(s: &str) -> ProcResult<NFSOperationStat> {
let mut s = s.split_whitespace();
let operations = from_str!(u64, expect!(s.next()));
let transmissions = from_str!(u64, expect!(s.next()));
let major_timeouts = from_str!(u64, expect!(s.next()));
let bytes_sent = from_str!(u64, expect!(s.next()));
let bytes_recv = from_str!(u64, expect!(s.next()));
let cum_queue_time_ms = from_str!(u64, expect!(s.next()));
let cum_resp_time_ms = from_str!(u64, expect!(s.next()));
let cum_total_req_time_ms = from_str!(u64, expect!(s.next()));
Ok(NFSOperationStat {
operations,
transmissions,
major_timeouts,
bytes_sent,
bytes_recv,
cum_queue_time: Duration::from_millis(cum_queue_time_ms),
cum_resp_time: Duration::from_millis(cum_resp_time_ms),
cum_total_req_time: Duration::from_millis(cum_total_req_time_ms),
})
}
}
pub type NFSPerOpStats = HashMap<String, NFSOperationStat>;
#[cfg(test)]
mod tests {
use crate::process::*;
use std::time::Duration;
#[test]
fn test_mountinfo() {
let s = "25 0 8:1 / / rw,relatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro";
let stat = MountInfo::from_line(s).unwrap();
println!("{:?}", stat);
}
#[test]
fn test_mountinfo_live() {
let me = Process::myself().unwrap();
let mounts = me.mountinfo().unwrap();
println!("{:#?}", mounts);
}
#[test]
fn test_proc_mountstats() {
let simple = MountStat::from_reader(
"device /dev/md127 mounted on /boot with fstype ext2
device /dev/md124 mounted on /home with fstype ext4
device tmpfs mounted on /run/user/0 with fstype tmpfs
"
.as_bytes(),
)
.unwrap();
let simple_parsed = vec![
MountStat {
device: Some("/dev/md127".to_string()),
mount_point: PathBuf::from("/boot"),
fs: "ext2".to_string(),
statistics: None,
},
MountStat {
device: Some("/dev/md124".to_string()),
mount_point: PathBuf::from("/home"),
fs: "ext4".to_string(),
statistics: None,
},
MountStat {
device: Some("tmpfs".to_string()),
mount_point: PathBuf::from("/run/user/0"),
fs: "tmpfs".to_string(),
statistics: None,
},
];
assert_eq!(simple, simple_parsed);
let mountstats = MountStat::from_reader("device elwe:/space mounted on /srv/elwe/space with fstype nfs4 statvers=1.1
opts: rw,vers=4.1,rsize=131072,wsize=131072,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=krb5,clientaddr=10.0.1.77,local_lock=none
age: 3542
impl_id: name='',domain='',date='0,0'
caps: caps=0x3ffdf,wtmult=512,dtsize=32768,bsize=0,namlen=255
nfsv4: bm0=0xfdffbfff,bm1=0x40f9be3e,bm2=0x803,acl=0x3,sessions,pnfs=not configured
sec: flavor=6,pseudoflavor=390003
events: 114 1579 5 3 132 20 3019 1 2 3 4 5 115 1 4 1 2 4 3 4 5 6 7 8 9 0 1
bytes: 1 2 3 4 5 6 7 8
RPC iostats version: 1.0 p/v: 100003/4 (nfs)
xprt: tcp 909 0 1 0 2 294 294 0 294 0 2 0 0
per-op statistics
NULL: 0 0 0 0 0 0 0 0
READ: 1 2 3 4 5 6 7 8
WRITE: 0 0 0 0 0 0 0 0
COMMIT: 0 0 0 0 0 0 0 0
OPEN: 1 1 0 320 420 0 124 124
".as_bytes()).unwrap();
let nfs_v4 = &mountstats[0];
match &nfs_v4.statistics {
Some(stats) => {
assert_eq!("1.1".to_string(), stats.version, "mountstats version wrongly parsed.");
assert_eq!(Duration::from_secs(3542), stats.age);
assert_eq!(1, stats.bytes.normal_read);
assert_eq!(114, stats.events.inode_revalidate);
assert!(stats.server_caps().unwrap().is_some());
}
None => {
panic!("Failed to retrieve nfs statistics");
}
}
}
#[test]
fn test_proc_mountstats_live() {
// this tries to parse a live mountstats file
// there are no assertions, but we still want to check for parsing errors (which can
// cause panics)
let stats = MountStat::from_reader(FileWrapper::open("/proc/self/mountstats").unwrap()).unwrap();
for stat in stats {
println!("{:#?}", stat);
if let Some(nfs) = stat.statistics {
println!(" {:?}", nfs.server_caps().unwrap());
}
}
}
}

94
vendor/procfs/src/process/namespaces.rs vendored Normal file
View file

@ -0,0 +1,94 @@
use rustix::fs::{AtFlags, Mode, OFlags};
use std::{collections::HashMap, ffi::OsString, path::PathBuf};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use crate::ProcResult;
use super::Process;
impl Process {
/// Describes namespaces to which the process with the corresponding PID belongs.
/// Doc reference: <https://man7.org/linux/man-pages/man7/namespaces.7.html>
/// The namespace type is the key for the HashMap, i.e 'net', 'user', etc.
pub fn namespaces(&self) -> ProcResult<HashMap<OsString, Namespace>> {
let mut namespaces = HashMap::new();
let dir_ns = wrap_io_error!(
self.root.join("ns"),
rustix::fs::openat(
&self.fd,
"ns",
OFlags::RDONLY | OFlags::DIRECTORY | OFlags::CLOEXEC,
Mode::empty()
)
)?;
let dir = wrap_io_error!(self.root.join("ns"), rustix::fs::Dir::read_from(&dir_ns))?;
for entry in dir {
let entry = entry.map_err(|_| build_internal_error!(format!("Unable to get ns dir entry")))?;
match entry.file_name().to_bytes() {
b"." | b".." => continue,
_ => {}
};
let path = self.root.join("ns").join(entry.file_name().to_string_lossy().as_ref());
let ns_type = OsString::from(entry.file_name().to_string_lossy().as_ref());
let stat = rustix::fs::statat(&dir_ns, entry.file_name(), AtFlags::empty())
.map_err(|_| build_internal_error!(format!("Unable to stat {:?}", path)))?;
if let Some(n) = namespaces.insert(
ns_type.clone(),
Namespace {
ns_type,
path,
identifier: stat.st_ino,
device_id: stat.st_dev,
},
) {
return Err(build_internal_error!(format!(
"NsType appears more than once {:?}",
n.ns_type
)));
}
}
Ok(namespaces)
}
}
/// Information about a namespace
///
/// See also the [Process::namespaces()] method
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Namespace {
/// Namespace type
pub ns_type: OsString,
/// Handle to the namespace
pub path: PathBuf,
/// Namespace identifier (inode number)
pub identifier: u64,
/// Device id of the namespace
pub device_id: u64,
}
impl PartialEq for Namespace {
fn eq(&self, other: &Self) -> bool {
// see https://lore.kernel.org/lkml/87poky5ca9.fsf@xmission.com/
self.identifier == other.identifier && self.device_id == other.device_id
}
}
impl Eq for Namespace {}
#[cfg(test)]
mod tests {
use crate::process::Process;
#[test]
fn test_namespaces() {
let myself = Process::myself().unwrap();
let namespaces = myself.namespaces().unwrap();
print!("{:?}", namespaces);
}
}

195
vendor/procfs/src/process/pagemap.rs vendored Normal file
View file

@ -0,0 +1,195 @@
use crate::{FileWrapper, ProcResult};
use bitflags::bitflags;
use std::{
io::{BufReader, Read, Seek, SeekFrom},
mem::size_of,
ops::{Bound, RangeBounds},
};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
const fn genmask(high: usize, low: usize) -> u64 {
let mask_bits = size_of::<u64>() * 8;
(!0 - (1 << low) + 1) & (!0 >> (mask_bits - 1 - high))
}
// source: include/linux/swap.h
const MAX_SWAPFILES_SHIFT: usize = 5;
// source: fs/proc/task_mmu.c
bitflags! {
/// Represents the fields and flags in a page table entry for a swapped page.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct SwapPageFlags: u64 {
/// Swap type if swapped
#[doc(hidden)]
const SWAP_TYPE = genmask(MAX_SWAPFILES_SHIFT - 1, 0);
/// Swap offset if swapped
#[doc(hidden)]
const SWAP_OFFSET = genmask(54, MAX_SWAPFILES_SHIFT);
/// PTE is soft-dirty
const SOFT_DIRTY = 1 << 55;
/// Page is exclusively mapped
const MMAP_EXCLUSIVE = 1 << 56;
/// Page is file-page or shared-anon
const FILE = 1 << 61;
/// Page is swapped
#[doc(hidden)]
const SWAP = 1 << 62;
/// Page is present
const PRESENT = 1 << 63;
}
}
impl SwapPageFlags {
/// Returns the swap type recorded in this entry.
pub fn get_swap_type(&self) -> u64 {
(*self & Self::SWAP_TYPE).bits()
}
/// Returns the swap offset recorded in this entry.
pub fn get_swap_offset(&self) -> u64 {
(*self & Self::SWAP_OFFSET).bits() >> MAX_SWAPFILES_SHIFT
}
}
bitflags! {
/// Represents the fields and flags in a page table entry for a memory page.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MemoryPageFlags: u64 {
/// Page frame number if present
#[doc(hidden)]
const PFN = genmask(54, 0);
/// PTE is soft-dirty
const SOFT_DIRTY = 1 << 55;
/// Page is exclusively mapped
const MMAP_EXCLUSIVE = 1 << 56;
/// Page is file-page or shared-anon
const FILE = 1 << 61;
/// Page is swapped
#[doc(hidden)]
const SWAP = 1 << 62;
/// Page is present
const PRESENT = 1 << 63;
}
}
impl MemoryPageFlags {
/// Returns the page frame number recorded in this entry.
pub fn get_page_frame_number(&self) -> u64 {
(*self & Self::PFN).bits()
}
}
/// Represents a page table entry in `/proc/<pid>/pagemap`.
#[derive(Debug)]
pub enum PageInfo {
/// Entry referring to a memory page
MemoryPage(MemoryPageFlags),
/// Entry referring to a swapped page
SwapPage(SwapPageFlags),
}
impl PageInfo {
pub(crate) fn parse_info(info: u64) -> Self {
let flags = MemoryPageFlags::from_bits_truncate(info);
if flags.contains(MemoryPageFlags::SWAP) {
Self::SwapPage(SwapPageFlags::from_bits_truncate(info))
} else {
Self::MemoryPage(flags)
}
}
}
/// Parses page table entries accessing `/proc/<pid>/pagemap`.
pub struct PageMap {
reader: BufReader<FileWrapper>,
}
impl PageMap {
pub(crate) fn from_file_wrapper(file: FileWrapper) -> Self {
Self {
reader: BufReader::new(file),
}
}
/// Retrieves information in the page table entry for the page at index `page_index`.
pub fn get_info(&mut self, page_index: usize) -> ProcResult<PageInfo> {
self.get_range_info(page_index..page_index + 1)
.map(|mut vec| vec.pop().unwrap())
}
/// Retrieves information in the page table entry for the pages with index in range `page_range`.
pub fn get_range_info(&mut self, page_range: impl RangeBounds<usize>) -> ProcResult<Vec<PageInfo>> {
// `start` is always included
let start = match page_range.start_bound() {
Bound::Included(v) => *v,
Bound::Excluded(v) => *v + 1,
Bound::Unbounded => 0,
};
// `end` is always excluded
let end = match page_range.end_bound() {
Bound::Included(v) => *v + 1,
Bound::Excluded(v) => *v,
Bound::Unbounded => std::usize::MAX / crate::page_size().unwrap() as usize,
};
let start_position = (start * size_of::<u64>()) as u64;
self.reader.seek(SeekFrom::Start(start_position))?;
let mut page_infos = Vec::with_capacity((end - start) as usize);
for _ in start..end {
let mut info_bytes = [0; size_of::<u64>()];
self.reader.read_exact(&mut info_bytes)?;
page_infos.push(PageInfo::parse_info(u64::from_ne_bytes(info_bytes)));
}
Ok(page_infos)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_genmask() {
let mask = genmask(3, 1);
assert_eq!(mask, 0b1110);
let mask = genmask(3, 0);
assert_eq!(mask, 0b1111);
let mask = genmask(63, 62);
assert_eq!(mask, 0b11 << 62);
}
#[test]
fn test_page_info() {
let pagemap_entry: u64 = 0b1000000110000000000000000000000000000000000000000000000000000011;
let info = PageInfo::parse_info(pagemap_entry);
if let PageInfo::MemoryPage(memory_flags) = info {
assert!(memory_flags
.contains(MemoryPageFlags::PRESENT | MemoryPageFlags::MMAP_EXCLUSIVE | MemoryPageFlags::SOFT_DIRTY));
assert_eq!(memory_flags.get_page_frame_number(), 0b11);
} else {
panic!("Wrong SWAP decoding");
}
let pagemap_entry: u64 = 0b1100000110000000000000000000000000000000000000000000000001100010;
let info = PageInfo::parse_info(pagemap_entry);
if let PageInfo::SwapPage(swap_flags) = info {
assert!(
swap_flags.contains(SwapPageFlags::PRESENT | SwapPageFlags::MMAP_EXCLUSIVE | SwapPageFlags::SOFT_DIRTY)
);
assert_eq!(swap_flags.get_swap_type(), 0b10);
assert_eq!(swap_flags.get_swap_offset(), 0b11);
} else {
panic!("Wrong SWAP decoding");
}
}
}

42
vendor/procfs/src/process/schedstat.rs vendored Normal file
View file

@ -0,0 +1,42 @@
use crate::from_iter;
use crate::ProcResult;
use std::io::Read;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Provides scheduler statistics of the process, based on the `/proc/<pid>/schedstat` file.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Schedstat {
/// Time spent on the cpu.
///
/// Measured in nanoseconds.
pub sum_exec_runtime: u64,
/// Time spent waiting on a runqueue.
///
/// Measured in nanoseconds.
pub run_delay: u64,
/// \# of timeslices run on this cpu.
pub pcount: u64,
}
impl Schedstat {
pub fn from_reader<R: Read>(mut r: R) -> ProcResult<Schedstat> {
let mut line = String::new();
r.read_to_string(&mut line)?;
let mut s = line.split_whitespace();
let schedstat = Schedstat {
sum_exec_runtime: expect!(from_iter(&mut s)),
run_delay: expect!(from_iter(&mut s)),
pcount: expect!(from_iter(&mut s)),
};
if cfg!(test) {
assert!(s.next().is_none());
}
Ok(schedstat)
}
}

View file

@ -0,0 +1,58 @@
use super::{MemoryMap, MemoryMapData};
use crate::{ProcError, ProcResult};
use std::io::{BufRead, BufReader, Read};
#[derive(Debug)]
pub struct SmapsRollup {
pub memory_map: MemoryMap,
pub memory_map_data: MemoryMapData,
}
impl SmapsRollup {
// this implemenation is similar but not identical to Process::smaps()
pub fn from_reader<R: Read>(r: R) -> ProcResult<SmapsRollup> {
let reader = BufReader::new(r);
let mut memory_map = MemoryMap::new();
let mut memory_map_data: MemoryMapData = Default::default();
let mut first = true;
for line in reader.lines() {
let line = line.map_err(|_| ProcError::Incomplete(None))?;
if first {
memory_map = MemoryMap::from_line(&line)?;
first = false;
continue;
}
let mut parts = line.split_ascii_whitespace();
let key = parts.next();
let value = parts.next();
if let (Some(k), Some(v)) = (key, value) {
// While most entries do have one, not all of them do.
let size_suffix = parts.next();
// Limited poking at /proc/<pid>/smaps and then checking if "MB", "GB", and "TB" appear in the C file that is
// supposedly responsible for creating smaps, has lead me to believe that the only size suffixes we'll ever encounter
// "kB", which is most likely kibibytes. Actually checking if the size suffix is any of the above is a way to
// future-proof the code, but I am not sure it is worth doing so.
let size_multiplier = if size_suffix.is_some() { 1024 } else { 1 };
let v = v
.parse::<u64>()
.map_err(|_| ProcError::Other("Value in `Key: Value` pair was not actually a number".into()))?;
// This ignores the case when our Key: Value pairs are really Key Value pairs. Is this a good idea?
let k = k.trim_end_matches(':');
memory_map_data.map.insert(k.into(), v * size_multiplier);
}
}
Ok(SmapsRollup {
memory_map,
memory_map_data,
})
}
}

426
vendor/procfs/src/process/stat.rs vendored Normal file
View file

@ -0,0 +1,426 @@
use super::ProcState;
use super::StatFlags;
#[cfg(feature = "chrono")]
use crate::TICKS_PER_SECOND;
use crate::{from_iter, KernelVersion, ProcResult};
use crate::{ProcError, KERNEL, PAGESIZE};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::io::Read;
use std::str::FromStr;
macro_rules! since_kernel {
($a:tt, $b:tt, $c:tt, $e:expr) => {
if let Ok(kernel) = *KERNEL {
if kernel >= KernelVersion::new($a, $b, $c) {
Some($e)
} else {
None
}
} else {
None
}
};
}
/// Status information about the process, based on the `/proc/<pid>/stat` file.
///
/// To construct one of these structures, you have to first create a [Process](crate::process::Process).
///
/// Not all fields are available in every kernel. These fields have `Option<T>` types.
///
/// New fields to this struct may be added at any time (even without a major or minor semver bump).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[non_exhaustive]
pub struct Stat {
/// The process ID.
pub pid: i32,
/// The filename of the executable, in parentheses.
///
/// This is visible whether or not the executable is swapped out.
///
/// Note that if the actual comm field contains invalid UTF-8 characters, they will be replaced
/// here by the U+FFFD replacement character.
pub comm: String,
/// Process State.
///
/// See [state()](#method.state) to get the process state as an enum.
pub state: char,
/// The PID of the parent of this process.
pub ppid: i32,
/// The process group ID of the process.
pub pgrp: i32,
/// The session ID of the process.
pub session: i32,
/// The controlling terminal of the process.
///
/// The minor device number is contained in the combination of bits 31 to 20 and 7 to 0;
/// the major device number is in bits 15 to 8.
///
/// See [tty_nr()](#method.tty_nr) to get this value decoded into a (major, minor) tuple
pub tty_nr: i32,
/// The ID of the foreground process group of the controlling terminal of the process.
pub tpgid: i32,
/// The kernel flags word of the process.
///
/// For bit meanings, see the PF_* defines in the Linux kernel source file
/// [`include/linux/sched.h`](https://github.com/torvalds/linux/blob/master/include/linux/sched.h).
///
/// See [flags()](#method.flags) to get a [`StatFlags`](struct.StatFlags.html) bitfield object.
pub flags: u32,
/// The number of minor faults the process has made which have not required loading a memory
/// page from disk.
pub minflt: u64,
/// The number of minor faults that the process's waited-for children have made.
pub cminflt: u64,
/// The number of major faults the process has made which have required loading a memory page
/// from disk.
pub majflt: u64,
/// The number of major faults that the process's waited-for children have made.
pub cmajflt: u64,
/// Amount of time that this process has been scheduled in user mode, measured in clock ticks
/// (divide by [`ticks_per_second()`](crate::ticks_per_second).
///
/// This includes guest time, guest_time (time spent running a virtual CPU, see below), so that
/// applications that are not aware of the guest time field do not lose that time from their
/// calculations.
pub utime: u64,
/// Amount of time that this process has been scheduled in kernel mode, measured in clock ticks
/// (divide by [`ticks_per_second()`](crate::ticks_per_second)).
pub stime: u64,
/// Amount of time that this process's waited-for children have been scheduled in
/// user mode, measured in clock ticks (divide by [`ticks_per_second()`](crate::ticks_per_second)).
///
/// This includes guest time, cguest_time (time spent running a virtual CPU, see below).
pub cutime: i64,
/// Amount of time that this process's waited-for children have been scheduled in kernel
/// mode, measured in clock ticks (divide by [`ticks_per_second()`](crate::ticks_per_second)).
pub cstime: i64,
/// For processes running a real-time scheduling policy (policy below; see sched_setscheduler(2)),
/// this is the negated scheduling priority, minus one;
///
/// That is, a number in the range -2 to -100,
/// corresponding to real-time priority 1 to 99. For processes running under a non-real-time
/// scheduling policy, this is the raw nice value (setpriority(2)) as represented in the kernel.
/// The kernel stores nice values as numbers in the range 0 (high) to 39 (low), corresponding
/// to the user-visible nice range of -20 to 19.
/// (This explanation is for Linux 2.6)
///
/// Before Linux 2.6, this was a scaled value based on the scheduler weighting given to this process.
pub priority: i64,
/// The nice value (see `setpriority(2)`), a value in the range 19 (low priority) to -20 (high priority).
pub nice: i64,
/// Number of threads in this process (since Linux 2.6). Before kernel 2.6, this field was
/// hard coded to 0 as a placeholder for an earlier removed field.
pub num_threads: i64,
/// The time in jiffies before the next SIGALRM is sent to the process due to an interval
/// timer.
///
/// Since kernel 2.6.17, this field is no longer maintained, and is hard coded as 0.
pub itrealvalue: i64,
/// The time the process started after system boot.
///
/// In kernels before Linux 2.6, this value was expressed in jiffies. Since Linux 2.6, the
/// value is expressed in clock ticks (divide by `sysconf(_SC_CLK_TCK)`).
///
#[cfg_attr(
feature = "chrono",
doc = "See also the [Stat::starttime()] method to get the starttime as a `DateTime` object"
)]
#[cfg_attr(
not(feature = "chrono"),
doc = "If you compile with the optional `chrono` feature, you can use the `starttime()` method to get the starttime as a `DateTime` object"
)]
pub starttime: u64,
/// Virtual memory size in bytes.
pub vsize: u64,
/// Resident Set Size: number of pages the process has in real memory.
///
/// This is just the pages which count toward text, data, or stack space.
/// This does not include pages which have not been demand-loaded in, or which are swapped out.
pub rss: u64,
/// Current soft limit in bytes on the rss of the process; see the description of RLIMIT_RSS in
/// getrlimit(2).
pub rsslim: u64,
/// The address above which program text can run.
pub startcode: u64,
/// The address below which program text can run.
pub endcode: u64,
/// The address of the start (i.e., bottom) of the stack.
pub startstack: u64,
/// The current value of ESP (stack pointer), as found in the kernel stack page for the
/// process.
pub kstkesp: u64,
/// The current EIP (instruction pointer).
pub kstkeip: u64,
/// The bitmap of pending signals, displayed as a decimal number. Obsolete, because it does
/// not provide information on real-time signals; use `/proc/<pid>/status` instead.
pub signal: u64,
/// The bitmap of blocked signals, displayed as a decimal number. Obsolete, because it does
/// not provide information on real-time signals; use `/proc/<pid>/status` instead.
pub blocked: u64,
/// The bitmap of ignored signals, displayed as a decimal number. Obsolete, because it does
/// not provide information on real-time signals; use `/proc/<pid>/status` instead.
pub sigignore: u64,
/// The bitmap of caught signals, displayed as a decimal number. Obsolete, because it does not
/// provide information on real-time signals; use `/proc/<pid>/status` instead.
pub sigcatch: u64,
/// This is the "channel" in which the process is waiting. It is the address of a location
/// in the kernel where the process is sleeping. The corresponding symbolic name can be found in
/// `/proc/<pid>/wchan`.
pub wchan: u64,
/// Number of pages swapped **(not maintained)**.
pub nswap: u64,
/// Cumulative nswap for child processes **(not maintained)**.
pub cnswap: u64,
/// Signal to be sent to parent when we die.
///
/// (since Linux 2.1.22)
pub exit_signal: Option<i32>,
/// CPU number last executed on.
///
/// (since Linux 2.2.8)
pub processor: Option<i32>,
/// Real-time scheduling priority
///
/// Real-time scheduling priority, a number in the range 1 to 99 for processes scheduled under a real-time policy, or 0, for non-real-time processes
///
/// (since Linux 2.5.19)
pub rt_priority: Option<u32>,
/// Scheduling policy (see sched_setscheduler(2)).
///
/// Decode using the `SCHED_*` constants in `linux/sched.h`.
///
/// (since Linux 2.5.19)
pub policy: Option<u32>,
/// Aggregated block I/O delays, measured in clock ticks (centiseconds).
///
/// (since Linux 2.6.18)
pub delayacct_blkio_ticks: Option<u64>,
/// Guest time of the process (time spent running a virtual CPU for a guest operating system),
/// measured in clock ticks (divide by [`ticks_per_second()`](crate::ticks_per_second))
///
/// (since Linux 2.6.24)
pub guest_time: Option<u64>,
/// Guest time of the process's children, measured in clock ticks (divide by
/// [`ticks_per_second()`](crate::ticks_per_second)).
///
/// (since Linux 2.6.24)
pub cguest_time: Option<i64>,
/// Address above which program initialized and uninitialized (BSS) data are placed.
///
/// (since Linux 3.3)
pub start_data: Option<u64>,
/// Address below which program initialized and uninitialized (BSS) data are placed.
///
/// (since Linux 3.3)
pub end_data: Option<u64>,
/// Address above which program heap can be expanded with brk(2).
///
/// (since Linux 3.3)
pub start_brk: Option<u64>,
/// Address above which program command-line arguments (argv) are placed.
///
/// (since Linux 3.5)
pub arg_start: Option<u64>,
/// Address below program command-line arguments (argv) are placed.
///
/// (since Linux 3.5)
pub arg_end: Option<u64>,
/// Address above which program environment is placed.
///
/// (since Linux 3.5)
pub env_start: Option<u64>,
/// Address below which program environment is placed.
///
/// (since Linux 3.5)
pub env_end: Option<u64>,
/// The thread's exit status in the form reported by waitpid(2).
///
/// (since Linux 3.5)
pub exit_code: Option<i32>,
}
impl Stat {
#[allow(clippy::cognitive_complexity)]
pub fn from_reader<R: Read>(mut r: R) -> ProcResult<Stat> {
// read in entire thing, this is only going to be 1 line
let mut buf = Vec::with_capacity(512);
r.read_to_end(&mut buf)?;
let line = String::from_utf8_lossy(&buf);
let buf = line.trim();
// find the first opening paren, and split off the first part (pid)
let start_paren = expect!(buf.find('('));
let end_paren = expect!(buf.rfind(')'));
let pid_s = &buf[..start_paren - 1];
let comm = buf[start_paren + 1..end_paren].to_string();
let rest = &buf[end_paren + 2..];
let pid = expect!(FromStr::from_str(pid_s));
let mut rest = rest.split(' ');
let state = expect!(expect!(rest.next()).chars().next());
let ppid = expect!(from_iter(&mut rest));
let pgrp = expect!(from_iter(&mut rest));
let session = expect!(from_iter(&mut rest));
let tty_nr = expect!(from_iter(&mut rest));
let tpgid = expect!(from_iter(&mut rest));
let flags = expect!(from_iter(&mut rest));
let minflt = expect!(from_iter(&mut rest));
let cminflt = expect!(from_iter(&mut rest));
let majflt = expect!(from_iter(&mut rest));
let cmajflt = expect!(from_iter(&mut rest));
let utime = expect!(from_iter(&mut rest));
let stime = expect!(from_iter(&mut rest));
let cutime = expect!(from_iter(&mut rest));
let cstime = expect!(from_iter(&mut rest));
let priority = expect!(from_iter(&mut rest));
let nice = expect!(from_iter(&mut rest));
let num_threads = expect!(from_iter(&mut rest));
let itrealvalue = expect!(from_iter(&mut rest));
let starttime = expect!(from_iter(&mut rest));
let vsize = expect!(from_iter(&mut rest));
let rss = expect!(from_iter(&mut rest));
let rsslim = expect!(from_iter(&mut rest));
let startcode = expect!(from_iter(&mut rest));
let endcode = expect!(from_iter(&mut rest));
let startstack = expect!(from_iter(&mut rest));
let kstkesp = expect!(from_iter(&mut rest));
let kstkeip = expect!(from_iter(&mut rest));
let signal = expect!(from_iter(&mut rest));
let blocked = expect!(from_iter(&mut rest));
let sigignore = expect!(from_iter(&mut rest));
let sigcatch = expect!(from_iter(&mut rest));
let wchan = expect!(from_iter(&mut rest));
let nswap = expect!(from_iter(&mut rest));
let cnswap = expect!(from_iter(&mut rest));
let exit_signal = since_kernel!(2, 1, 22, expect!(from_iter(&mut rest)));
let processor = since_kernel!(2, 2, 8, expect!(from_iter(&mut rest)));
let rt_priority = since_kernel!(2, 5, 19, expect!(from_iter(&mut rest)));
let policy = since_kernel!(2, 5, 19, expect!(from_iter(&mut rest)));
let delayacct_blkio_ticks = since_kernel!(2, 6, 18, expect!(from_iter(&mut rest)));
let guest_time = since_kernel!(2, 6, 24, expect!(from_iter(&mut rest)));
let cguest_time = since_kernel!(2, 6, 24, expect!(from_iter(&mut rest)));
let start_data = since_kernel!(3, 3, 0, expect!(from_iter(&mut rest)));
let end_data = since_kernel!(3, 3, 0, expect!(from_iter(&mut rest)));
let start_brk = since_kernel!(3, 3, 0, expect!(from_iter(&mut rest)));
let arg_start = since_kernel!(3, 5, 0, expect!(from_iter(&mut rest)));
let arg_end = since_kernel!(3, 5, 0, expect!(from_iter(&mut rest)));
let env_start = since_kernel!(3, 5, 0, expect!(from_iter(&mut rest)));
let env_end = since_kernel!(3, 5, 0, expect!(from_iter(&mut rest)));
let exit_code = since_kernel!(3, 5, 0, expect!(from_iter(&mut rest)));
Ok(Stat {
pid,
comm,
state,
ppid,
pgrp,
session,
tty_nr,
tpgid,
flags,
minflt,
cminflt,
majflt,
cmajflt,
utime,
stime,
cutime,
cstime,
priority,
nice,
num_threads,
itrealvalue,
starttime,
vsize,
rss,
rsslim,
startcode,
endcode,
startstack,
kstkesp,
kstkeip,
signal,
blocked,
sigignore,
sigcatch,
wchan,
nswap,
cnswap,
exit_signal,
processor,
rt_priority,
policy,
delayacct_blkio_ticks,
guest_time,
cguest_time,
start_data,
end_data,
start_brk,
arg_start,
arg_end,
env_start,
env_end,
exit_code,
})
}
pub fn state(&self) -> ProcResult<ProcState> {
ProcState::from_char(self.state)
.ok_or_else(|| build_internal_error!(format!("{:?} is not a recognized process state", self.state)))
}
pub fn tty_nr(&self) -> (i32, i32) {
// minor is bits 31-20 and 7-0
// major is 15-8
// mmmmmmmmmmmm____MMMMMMMMmmmmmmmm
// 11111111111100000000000000000000
let major = (self.tty_nr & 0xfff00) >> 8;
let minor = (self.tty_nr & 0x000ff) | ((self.tty_nr >> 12) & 0xfff00);
(major, minor)
}
/// The kernel flags word of the process, as a bitfield
///
/// See also the [Stat::flags](struct.Stat.html#structfield.flags) field.
pub fn flags(&self) -> ProcResult<StatFlags> {
StatFlags::from_bits(self.flags)
.ok_or_else(|| build_internal_error!(format!("Can't construct flags bitfield from {:?}", self.flags)))
}
/// Get the starttime of the process as a `DateTime` object.
///
/// See also the [`starttime`](struct.Stat.html#structfield.starttime) field.
///
/// This function requires the "chrono" features to be enabled (which it is by default).
#[cfg(feature = "chrono")]
pub fn starttime(&self) -> ProcResult<chrono::DateTime<chrono::Local>> {
let tts = TICKS_PER_SECOND
.as_ref()
.map_err(|e| ProcError::Other(format!("Failed to get ticks_per_second: {:?}", e)))?;
let seconds_since_boot = self.starttime as f32 / *tts as f32;
let boot_time = crate::boot_time()?;
Ok(boot_time + chrono::Duration::milliseconds((seconds_since_boot * 1000.0) as i64))
}
/// Gets the Resident Set Size (in bytes)
///
/// The `rss` field will return the same value in pages
pub fn rss_bytes(&self) -> ProcResult<u64> {
let pagesize = PAGESIZE
.as_ref()
.map_err(|e| ProcError::Other(format!("Failed to get pagesize: {:?}", e)))?;
Ok(self.rss * *pagesize)
}
}

391
vendor/procfs/src/process/status.rs vendored Normal file
View file

@ -0,0 +1,391 @@
use crate::{FromStrRadix, ProcResult};
use std::collections::HashMap;
use std::io::{BufRead, BufReader, Read};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Status information about the process, based on the `/proc/<pid>/status` file.
///
/// To construct this structure, see [Process::status()](crate::process::Process::status).
///
/// Not all fields are available in every kernel. These fields have `Option<T>` types.
/// In general, the current kernel version will tell you what fields you can expect, but this
/// isn't totally reliable, since some kernels might backport certain fields, or fields might
/// only be present if certain kernel configuration options are enabled. Be prepared to
/// handle `None` values.
///
/// New fields to this struct may be added at any time (even without a major or minor semver bump).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[non_exhaustive]
pub struct Status {
/// Command run by this process.
pub name: String,
/// Process umask, expressed in octal with a leading zero; see umask(2). (Since Linux 4.7.)
pub umask: Option<u32>,
/// Current state of the process.
pub state: String,
/// Thread group ID (i.e., Process ID).
pub tgid: i32,
/// NUMA group ID (0 if none; since Linux 3.13).
pub ngid: Option<i32>,
/// Thread ID (see gettid(2)).
pub pid: i32,
/// PID of parent process.
pub ppid: i32,
/// PID of process tracing this process (0 if not being traced).
pub tracerpid: i32,
/// Real UID.
pub ruid: u32,
/// Effective UID.
pub euid: u32,
/// Saved set UID.
pub suid: u32,
/// Filesystem UID.
pub fuid: u32,
/// Real GID.
pub rgid: u32,
/// Effective GID.
pub egid: u32,
/// Saved set GID.
pub sgid: u32,
/// Filesystem GID.
pub fgid: u32,
/// Number of file descriptor slots currently allocated.
pub fdsize: u32,
/// Supplementary group list.
pub groups: Vec<i32>,
/// Thread group ID (i.e., PID) in each of the PID
/// namespaces of which (pid)[struct.Status.html#structfield.pid] is a member. The leftmost entry
/// shows the value with respect to the PID namespace of the
/// reading process, followed by the value in successively
/// nested inner namespaces. (Since Linux 4.1.)
pub nstgid: Option<Vec<i32>>,
/// Thread ID in each of the PID namespaces of which
/// (pid)[struct.Status.html#structfield.pid] is a member. The fields are ordered as for NStgid.
/// (Since Linux 4.1.)
pub nspid: Option<Vec<i32>>,
/// Process group ID in each of the PID namespaces of
/// which (pid)[struct.Status.html#structfield.pid] is a member. The fields are ordered as for NStgid. (Since Linux 4.1.)
pub nspgid: Option<Vec<i32>>,
/// NSsid: descendant namespace session ID hierarchy Session ID
/// in each of the PID namespaces of which (pid)[struct.Status.html#structfield.pid] is a member.
/// The fields are ordered as for NStgid. (Since Linux 4.1.)
pub nssid: Option<Vec<i32>>,
/// Peak virtual memory size by kibibytes.
pub vmpeak: Option<u64>,
/// Virtual memory size by kibibytes.
pub vmsize: Option<u64>,
/// Locked memory size by kibibytes (see mlock(3)).
pub vmlck: Option<u64>,
/// Pinned memory size by kibibytes (since Linux 3.2). These are
/// pages that can't be moved because something needs to
/// directly access physical memory.
pub vmpin: Option<u64>,
/// Peak resident set size by kibibytes ("high water mark").
pub vmhwm: Option<u64>,
/// Resident set size by kibibytes. Note that the value here is the
/// sum of RssAnon, RssFile, and RssShmem.
pub vmrss: Option<u64>,
/// Size of resident anonymous memory by kibibytes. (since Linux 4.5).
pub rssanon: Option<u64>,
/// Size of resident file mappings by kibibytes. (since Linux 4.5).
pub rssfile: Option<u64>,
/// Size of resident shared memory by kibibytes (includes System V
/// shared memory, mappings from tmpfs(5), and shared anonymous
/// mappings). (since Linux 4.5).
pub rssshmem: Option<u64>,
/// Size of data by kibibytes.
pub vmdata: Option<u64>,
/// Size of stack by kibibytes.
pub vmstk: Option<u64>,
/// Size of text segments by kibibytes.
pub vmexe: Option<u64>,
/// Shared library code size by kibibytes.
pub vmlib: Option<u64>,
/// Page table entries size by kibibytes (since Linux 2.6.10).
pub vmpte: Option<u64>,
/// Swapped-out virtual memory size by anonymous private
/// pages by kibibytes; shmem swap usage is not included (since Linux 2.6.34).
pub vmswap: Option<u64>,
/// Size of hugetlb memory portions by kB. (since Linux 4.4).
pub hugetlbpages: Option<u64>,
/// Number of threads in process containing this thread.
pub threads: u64,
/// This field contains two slash-separated numbers that
/// relate to queued signals for the real user ID of this
/// process. The first of these is the number of currently
/// queued signals for this real user ID, and the second is the
/// resource limit on the number of queued signals for this
/// process (see the description of RLIMIT_SIGPENDING in
/// getrlimit(2)).
pub sigq: (u64, u64),
/// Number of signals pending for thread (see pthreads(7) and signal(7)).
pub sigpnd: u64,
/// Number of signals pending for process as a whole (see pthreads(7) and signal(7)).
pub shdpnd: u64,
/// Masks indicating signals being blocked (see signal(7)).
pub sigblk: u64,
/// Masks indicating signals being ignored (see signal(7)).
pub sigign: u64,
/// Masks indicating signals being caught (see signal(7)).
pub sigcgt: u64,
/// Masks of capabilities enabled in inheritable sets (see capabilities(7)).
pub capinh: u64,
/// Masks of capabilities enabled in permitted sets (see capabilities(7)).
pub capprm: u64,
/// Masks of capabilities enabled in effective sets (see capabilities(7)).
pub capeff: u64,
/// Capability Bounding set (since Linux 2.6.26, see capabilities(7)).
pub capbnd: Option<u64>,
/// Ambient capability set (since Linux 4.3, see capabilities(7)).
pub capamb: Option<u64>,
/// Value of the no_new_privs bit (since Linux 4.10, see prctl(2)).
pub nonewprivs: Option<u64>,
/// Seccomp mode of the process (since Linux 3.8, see
/// seccomp(2)). 0 means SECCOMP_MODE_DISABLED; 1 means SEC
/// COMP_MODE_STRICT; 2 means SECCOMP_MODE_FILTER. This field
/// is provided only if the kernel was built with the CON
/// FIG_SECCOMP kernel configuration option enabled.
pub seccomp: Option<u32>,
/// Speculative store bypass mitigation status.
pub speculation_store_bypass: Option<String>,
/// Mask of CPUs on which this process may run (since Linux 2.6.24, see cpuset(7)).
pub cpus_allowed: Option<Vec<u32>>,
/// Same as previous, but in "list format" (since Linux 2.6.26, see cpuset(7)).
pub cpus_allowed_list: Option<Vec<(u32, u32)>>,
/// Mask of memory nodes allowed to this process (since Linux 2.6.24, see cpuset(7)).
pub mems_allowed: Option<Vec<u32>>,
/// Same as previous, but in "list format" (since Linux 2.6.26, see cpuset(7)).
pub mems_allowed_list: Option<Vec<(u32, u32)>>,
/// Number of voluntary context switches (since Linux 2.6.23).
pub voluntary_ctxt_switches: Option<u64>,
/// Number of involuntary context switches (since Linux 2.6.23).
pub nonvoluntary_ctxt_switches: Option<u64>,
/// Contains true if the process is currently dumping core.
///
/// This information can be used by a monitoring process to avoid killing a processing that is
/// currently dumping core, which could result in a corrupted core dump file.
///
/// (Since Linux 4.15)
pub core_dumping: Option<bool>,
/// Contains true if the process is allowed to use THP
///
/// (Since Linux 5.0)
pub thp_enabled: Option<bool>,
}
impl Status {
pub fn from_reader<R: Read>(r: R) -> ProcResult<Status> {
let mut map = HashMap::new();
let reader = BufReader::new(r);
for line in reader.lines() {
let line = line?;
if line.is_empty() {
continue;
}
let mut s = line.split(':');
let field = expect!(s.next());
let value = expect!(s.next()).trim();
map.insert(field.to_string(), value.to_string());
}
let status = Status {
name: expect!(map.remove("Name")),
umask: map.remove("Umask").map(|x| Ok(from_str!(u32, &x, 8))).transpose()?,
state: expect!(map.remove("State")),
tgid: from_str!(i32, &expect!(map.remove("Tgid"))),
ngid: map.remove("Ngid").map(|x| Ok(from_str!(i32, &x))).transpose()?,
pid: from_str!(i32, &expect!(map.remove("Pid"))),
ppid: from_str!(i32, &expect!(map.remove("PPid"))),
tracerpid: from_str!(i32, &expect!(map.remove("TracerPid"))),
ruid: expect!(Status::parse_uid_gid(expect!(map.get("Uid")), 0)),
euid: expect!(Status::parse_uid_gid(expect!(map.get("Uid")), 1)),
suid: expect!(Status::parse_uid_gid(expect!(map.get("Uid")), 2)),
fuid: expect!(Status::parse_uid_gid(&expect!(map.remove("Uid")), 3)),
rgid: expect!(Status::parse_uid_gid(expect!(map.get("Gid")), 0)),
egid: expect!(Status::parse_uid_gid(expect!(map.get("Gid")), 1)),
sgid: expect!(Status::parse_uid_gid(expect!(map.get("Gid")), 2)),
fgid: expect!(Status::parse_uid_gid(&expect!(map.remove("Gid")), 3)),
fdsize: from_str!(u32, &expect!(map.remove("FDSize"))),
groups: Status::parse_list(&expect!(map.remove("Groups")))?,
nstgid: map.remove("NStgid").map(|x| Status::parse_list(&x)).transpose()?,
nspid: map.remove("NSpid").map(|x| Status::parse_list(&x)).transpose()?,
nspgid: map.remove("NSpgid").map(|x| Status::parse_list(&x)).transpose()?,
nssid: map.remove("NSsid").map(|x| Status::parse_list(&x)).transpose()?,
vmpeak: Status::parse_with_kb(map.remove("VmPeak"))?,
vmsize: Status::parse_with_kb(map.remove("VmSize"))?,
vmlck: Status::parse_with_kb(map.remove("VmLck"))?,
vmpin: Status::parse_with_kb(map.remove("VmPin"))?,
vmhwm: Status::parse_with_kb(map.remove("VmHWM"))?,
vmrss: Status::parse_with_kb(map.remove("VmRSS"))?,
rssanon: Status::parse_with_kb(map.remove("RssAnon"))?,
rssfile: Status::parse_with_kb(map.remove("RssFile"))?,
rssshmem: Status::parse_with_kb(map.remove("RssShmem"))?,
vmdata: Status::parse_with_kb(map.remove("VmData"))?,
vmstk: Status::parse_with_kb(map.remove("VmStk"))?,
vmexe: Status::parse_with_kb(map.remove("VmExe"))?,
vmlib: Status::parse_with_kb(map.remove("VmLib"))?,
vmpte: Status::parse_with_kb(map.remove("VmPTE"))?,
vmswap: Status::parse_with_kb(map.remove("VmSwap"))?,
hugetlbpages: Status::parse_with_kb(map.remove("HugetlbPages"))?,
threads: from_str!(u64, &expect!(map.remove("Threads"))),
sigq: expect!(Status::parse_sigq(&expect!(map.remove("SigQ")))),
sigpnd: from_str!(u64, &expect!(map.remove("SigPnd")), 16),
shdpnd: from_str!(u64, &expect!(map.remove("ShdPnd")), 16),
sigblk: from_str!(u64, &expect!(map.remove("SigBlk")), 16),
sigign: from_str!(u64, &expect!(map.remove("SigIgn")), 16),
sigcgt: from_str!(u64, &expect!(map.remove("SigCgt")), 16),
capinh: from_str!(u64, &expect!(map.remove("CapInh")), 16),
capprm: from_str!(u64, &expect!(map.remove("CapPrm")), 16),
capeff: from_str!(u64, &expect!(map.remove("CapEff")), 16),
capbnd: map.remove("CapBnd").map(|x| Ok(from_str!(u64, &x, 16))).transpose()?,
capamb: map.remove("CapAmb").map(|x| Ok(from_str!(u64, &x, 16))).transpose()?,
nonewprivs: map.remove("NoNewPrivs").map(|x| Ok(from_str!(u64, &x))).transpose()?,
seccomp: map.remove("Seccomp").map(|x| Ok(from_str!(u32, &x))).transpose()?,
speculation_store_bypass: map.remove("Speculation_Store_Bypass"),
cpus_allowed: map
.remove("Cpus_allowed")
.map(|x| Status::parse_allowed(&x))
.transpose()?,
cpus_allowed_list: map
.remove("Cpus_allowed_list")
.and_then(|x| Status::parse_allowed_list(&x).ok()),
mems_allowed: map
.remove("Mems_allowed")
.map(|x| Status::parse_allowed(&x))
.transpose()?,
mems_allowed_list: map
.remove("Mems_allowed_list")
.and_then(|x| Status::parse_allowed_list(&x).ok()),
voluntary_ctxt_switches: map
.remove("voluntary_ctxt_switches")
.map(|x| Ok(from_str!(u64, &x)))
.transpose()?,
nonvoluntary_ctxt_switches: map
.remove("nonvoluntary_ctxt_switches")
.map(|x| Ok(from_str!(u64, &x)))
.transpose()?,
core_dumping: map.remove("CoreDumping").map(|x| x == "1"),
thp_enabled: map.remove("THP_enabled").map(|x| x == "1"),
};
if cfg!(test) && !map.is_empty() {
// This isn't an error because different kernels may put different data here, and distros
// may backport these changes into older kernels. Too hard to keep track of
eprintln!("Warning: status map is not empty: {:#?}", map);
}
Ok(status)
}
fn parse_with_kb<T: FromStrRadix>(s: Option<String>) -> ProcResult<Option<T>> {
if let Some(s) = s {
Ok(Some(from_str!(T, &s.replace(" kB", ""))))
} else {
Ok(None)
}
}
pub(crate) fn parse_uid_gid(s: &str, i: usize) -> ProcResult<u32> {
Ok(from_str!(u32, expect!(s.split_whitespace().nth(i))))
}
fn parse_sigq(s: &str) -> ProcResult<(u64, u64)> {
let mut iter = s.split('/');
let first = from_str!(u64, expect!(iter.next()));
let second = from_str!(u64, expect!(iter.next()));
Ok((first, second))
}
fn parse_list<T: FromStrRadix>(s: &str) -> ProcResult<Vec<T>> {
let mut ret = Vec::new();
for i in s.split_whitespace() {
ret.push(from_str!(T, i));
}
Ok(ret)
}
fn parse_allowed(s: &str) -> ProcResult<Vec<u32>> {
let mut ret = Vec::new();
for i in s.split(',') {
ret.push(from_str!(u32, i, 16));
}
Ok(ret)
}
fn parse_allowed_list(s: &str) -> ProcResult<Vec<(u32, u32)>> {
let mut ret = Vec::new();
for s in s.split(',') {
if s.contains('-') {
let mut s = s.split('-');
let beg = from_str!(u32, expect!(s.next()));
if let Some(x) = s.next() {
let end = from_str!(u32, x);
ret.push((beg, end));
}
} else {
let beg = from_str!(u32, s);
let end = from_str!(u32, s);
ret.push((beg, end));
}
}
Ok(ret)
}
}
#[cfg(test)]
mod tests {
use crate::process::*;
#[test]
fn test_proc_status() {
let myself = Process::myself().unwrap();
let stat = myself.stat().unwrap();
let status = myself.status().unwrap();
println!("{:?}", status);
assert_eq!(status.name, stat.comm);
assert_eq!(status.pid, stat.pid);
assert_eq!(status.ppid, stat.ppid);
}
#[test]
fn test_proc_status_for_kthreadd() {
// when running in a container, pid2 probably isn't kthreadd, so check
let kthreadd = match process::Process::new(2) {
Ok(p) => p,
Err(ProcError::NotFound(_)) => {
return; // ok we can ignore
}
Err(e) => {
panic!("{}", e);
}
};
let status = kthreadd.status().unwrap();
println!("{:?}", status);
assert_eq!(status.pid, 2);
assert_eq!(status.vmpeak, None);
assert_eq!(status.vmsize, None);
assert_eq!(status.vmlck, None);
assert_eq!(status.vmpin, None);
assert_eq!(status.vmhwm, None);
assert_eq!(status.vmrss, None);
assert_eq!(status.rssanon, None);
assert_eq!(status.rssfile, None);
assert_eq!(status.rssshmem, None);
assert_eq!(status.vmdata, None);
assert_eq!(status.vmstk, None);
assert_eq!(status.vmexe, None);
assert_eq!(status.vmlib, None);
assert_eq!(status.vmpte, None);
assert_eq!(status.vmswap, None);
assert_eq!(status.hugetlbpages, None);
}
}

237
vendor/procfs/src/process/task.rs vendored Normal file
View file

@ -0,0 +1,237 @@
use std::io::Read;
use std::path::{Path, PathBuf};
use super::{FileWrapper, Io, Schedstat, Stat, Status};
use crate::{ProcError, ProcResult};
use rustix::fd::{OwnedFd, BorrowedFd};
/// A task (aka Thread) inside of a [`Process`](crate::process::Process)
///
/// Created by [`Process::tasks`](crate::process::Process::tasks), tasks in
/// general are similar to Processes and should have mostly the same fields.
#[derive(Debug)]
pub struct Task {
fd: OwnedFd,
/// The ID of the process that this task belongs to
pub pid: i32,
/// The task ID
pub tid: i32,
/// Task root: `/proc/<pid>/task/<tid>`
pub(crate) root: PathBuf,
}
impl Task {
/// Create a new `Task` inside of the process
///
/// This API is designed to be ergonomic from inside of [`TasksIter`](super::TasksIter)
pub(crate) fn from_process_at<P: AsRef<Path>, Q: AsRef<Path>>(
base: P,
dirfd: BorrowedFd,
path: Q,
pid: i32,
tid: i32,
) -> ProcResult<Task> {
use rustix::fs::{Mode, OFlags};
let p = path.as_ref();
let root = base.as_ref().join(p);
let fd = wrap_io_error!(
root,
rustix::fs::openat(
dirfd,
p,
OFlags::PATH | OFlags::DIRECTORY | OFlags::CLOEXEC,
Mode::empty()
)
)?;
Ok(Task { fd, pid, tid, root })
}
/// Thread info from `/proc/<pid>/task/<tid>/stat`
///
/// Many of the returned fields will be the same as the parent process, but some fields like `utime` and `stime` will be per-task
pub fn stat(&self) -> ProcResult<Stat> {
Stat::from_reader(FileWrapper::open_at(&self.root, &self.fd, "stat")?)
}
/// Thread info from `/proc/<pid>/task/<tid>/status`
///
/// Many of the returned fields will be the same as the parent process
pub fn status(&self) -> ProcResult<Status> {
Status::from_reader(FileWrapper::open_at(&self.root, &self.fd, "status")?)
}
/// Thread IO info from `/proc/<pid>/task/<tid>/io`
///
/// This data will be unique per task.
pub fn io(&self) -> ProcResult<Io> {
Io::from_reader(FileWrapper::open_at(&self.root, &self.fd, "io")?)
}
/// Thread scheduler info from `/proc/<pid>/task/<tid>/schedstat`
///
/// This data will be unique per task.
pub fn schedstat(&self) -> ProcResult<Schedstat> {
Schedstat::from_reader(FileWrapper::open_at(&self.root, &self.fd, "schedstat")?)
}
/// Thread children from `/proc/<pid>/task/<tid>/children`
///
/// WARNING:
/// This interface is not reliable unless all the child processes are stoppped or frozen.
/// If a child task exits while the file is being read, non-exiting children may be omitted.
/// See the procfs(5) man page for more information.
///
/// This data will be unique per task.
pub fn children(&self) -> ProcResult<Vec<u32>> {
let mut buf = String::new();
let mut file = FileWrapper::open_at(&self.root, &self.fd, "children")?;
file.read_to_string(&mut buf)?;
buf.split_whitespace()
.map(|child| {
child
.parse()
.map_err(|_| ProcError::Other("Failed to parse task's child PIDs".to_string()))
})
.collect()
}
}
#[cfg(test)]
mod tests {
use crate::process::Io;
use rustix;
use std::process;
use std::sync::{Arc, Barrier};
#[test]
#[cfg(not(tarpaulin))] // this test is unstable under tarpaulin, and i'm yet sure why
// When this test runs in CI, run it single-threaded
fn test_task_runsinglethread() {
use std::io::Read;
let me = crate::process::Process::myself().unwrap();
let (work_barrier, w_a, w_b) = {
let b = Arc::new(Barrier::new(3));
(b.clone(), b.clone(), b)
};
let (done_barrier, d_a, d_b) = {
let b = Arc::new(Barrier::new(3));
(b.clone(), b.clone(), b)
};
let bytes_to_read = 2_000_000;
// create a new task to do some work
let join_a = std::thread::Builder::new()
.name("one".to_owned())
.spawn(move || {
let mut vec = Vec::new();
let zero = std::fs::File::open("/dev/zero").unwrap();
zero.take(bytes_to_read).read_to_end(&mut vec).unwrap();
assert_eq!(vec.len(), bytes_to_read as usize);
// spin for about 52 ticks (utime accounting isn't perfectly accurate)
let dur = std::time::Duration::from_millis(52 * (1000 / crate::ticks_per_second().unwrap()) as u64);
let start = std::time::Instant::now();
while start.elapsed() <= dur {
// spin
}
w_a.wait();
d_a.wait()
})
.unwrap();
// create a new task that does nothing
let join_b = std::thread::Builder::new()
.name("two".to_owned())
.spawn(move || {
w_b.wait();
d_b.wait();
})
.unwrap();
work_barrier.wait();
let mut found_one = false;
let mut found_two = false;
let mut summed_io = Io {
rchar: 0,
wchar: 0,
syscr: 0,
syscw: 0,
read_bytes: 0,
write_bytes: 0,
cancelled_write_bytes: 0,
};
for task in me.tasks().unwrap() {
let task = task.unwrap();
let stat = task.stat().unwrap();
let status = task.status().unwrap();
let io = task.io().unwrap();
summed_io.rchar += io.rchar;
summed_io.wchar += io.wchar;
summed_io.syscr += io.syscr;
summed_io.syscw += io.syscw;
summed_io.read_bytes += io.read_bytes;
summed_io.write_bytes += io.write_bytes;
summed_io.cancelled_write_bytes += io.cancelled_write_bytes;
if stat.comm == "one" && status.name == "one" {
found_one = true;
assert!(io.rchar >= bytes_to_read);
assert!(stat.utime >= 50, "utime({}) too small", stat.utime);
}
if stat.comm == "two" && status.name == "two" {
found_two = true;
// The process might read miscellaneous things from procfs or
// things like /sys/devices/system/cpu/online; allow some small
// reads.
assert!(io.rchar < bytes_to_read);
assert_eq!(io.wchar, 0);
assert_eq!(stat.utime, 0);
}
}
let proc_io = me.io().unwrap();
// these should be mostly the same (though creating the IO struct in the above line will cause some IO to occur)
println!("{:?}", summed_io);
println!("{:?}", proc_io);
// signal the threads to exit
done_barrier.wait();
join_a.join().unwrap();
join_b.join().unwrap();
assert!(found_one);
assert!(found_two);
}
#[test]
fn test_task_children() {
// Use tail -f /dev/null to create two infinite processes
let mut command = process::Command::new("tail");
command.arg("-f").arg("/dev/null");
let (mut child1, mut child2) = (command.spawn().unwrap(), command.spawn().unwrap());
let tid = rustix::thread::gettid();
let children = crate::process::Process::myself()
.unwrap()
.task_from_tid(tid.as_raw_nonzero().get() as i32)
.unwrap()
.children()
.unwrap();
assert!(children.contains(&child1.id()));
assert!(children.contains(&child2.id()));
child1.kill().unwrap();
child2.kill().unwrap();
}
}

501
vendor/procfs/src/process/tests.rs vendored Normal file
View file

@ -0,0 +1,501 @@
use super::*;
fn check_unwrap<T>(prc: &Process, val: ProcResult<T>) -> Option<T> {
match val {
Ok(t) => Some(t),
Err(ProcError::PermissionDenied(_)) if !rustix::process::geteuid().is_root() => {
// we are not root, and so a permission denied error is OK
None
}
Err(ProcError::NotFound(path)) => {
// a common reason for this error is that the process isn't running anymore
if prc.is_alive() {
panic!("{:?} not found", path)
}
None
}
Err(err) => panic!("check_unwrap error for {} {:?}", prc.pid, err),
}
}
fn check_unwrap_task<T>(prc: &Process, val: ProcResult<T>) -> Option<T> {
match val {
Ok(t) => Some(t),
Err(ProcError::PermissionDenied(_)) if !rustix::process::geteuid().is_root() => {
// we are not root, and so a permission denied error is OK
None
}
Err(ProcError::NotFound(_path)) => {
// tasks can be more short-lived thanks processes, and it seems that accessing
// the /status and /stat files for tasks is quite unreliable
None
}
Err(err) => panic!("check_unwrap error for {} {:?}", prc.pid, err),
}
}
#[test]
fn test_main_thread_task() {
let myself = Process::myself().unwrap();
let task = myself.task_main_thread().unwrap();
check_unwrap(&myself, task.stat());
}
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_self_proc() {
let myself = Process::myself().unwrap().stat().unwrap();
println!("{:#?}", myself);
println!("state: {:?}", myself.state());
println!("tty: {:?}", myself.tty_nr());
println!("flags: {:?}", myself.flags());
#[cfg(feature = "chrono")]
println!("starttime: {:#?}", myself.starttime());
let kernel = KernelVersion::current().unwrap();
if kernel >= KernelVersion::new(2, 1, 22) {
assert!(myself.exit_signal.is_some());
} else {
assert!(myself.exit_signal.is_none());
}
if kernel >= KernelVersion::new(2, 2, 8) {
assert!(myself.processor.is_some());
} else {
assert!(myself.processor.is_none());
}
if kernel >= KernelVersion::new(2, 5, 19) {
assert!(myself.rt_priority.is_some());
} else {
assert!(myself.rt_priority.is_none());
}
if kernel >= KernelVersion::new(2, 5, 19) {
assert!(myself.rt_priority.is_some());
assert!(myself.policy.is_some());
} else {
assert!(myself.rt_priority.is_none());
assert!(myself.policy.is_none());
}
if kernel >= KernelVersion::new(2, 6, 18) {
assert!(myself.delayacct_blkio_ticks.is_some());
} else {
assert!(myself.delayacct_blkio_ticks.is_none());
}
if kernel >= KernelVersion::new(2, 6, 24) {
assert!(myself.guest_time.is_some());
assert!(myself.cguest_time.is_some());
} else {
assert!(myself.guest_time.is_none());
assert!(myself.cguest_time.is_none());
}
if kernel >= KernelVersion::new(3, 3, 0) {
assert!(myself.start_data.is_some());
assert!(myself.end_data.is_some());
assert!(myself.start_brk.is_some());
} else {
assert!(myself.start_data.is_none());
assert!(myself.end_data.is_none());
assert!(myself.start_brk.is_none());
}
if kernel >= KernelVersion::new(3, 5, 0) {
assert!(myself.arg_start.is_some());
assert!(myself.arg_end.is_some());
assert!(myself.env_start.is_some());
assert!(myself.env_end.is_some());
assert!(myself.exit_code.is_some());
} else {
assert!(myself.arg_start.is_none());
assert!(myself.arg_end.is_none());
assert!(myself.env_start.is_none());
assert!(myself.env_end.is_none());
assert!(myself.exit_code.is_none());
}
}
#[test]
fn test_all() {
let is_wsl2 = kernel_config()
.ok()
.and_then(|cfg| {
cfg.get("CONFIG_LOCALVERSION").and_then(|ver| {
if let ConfigSetting::Value(s) = ver {
Some(s == "\"-microsoft-standard\"")
} else {
None
}
})
})
.unwrap_or(false);
for p in all_processes().unwrap() {
// note: this test doesn't unwrap, since some of this data requires root to access
// so permission denied errors are common. The check_unwrap helper function handles
// this.
let prc = p.unwrap();
let stat = prc.stat().unwrap();
println!("{} {}", prc.pid(), stat.comm);
stat.flags().unwrap();
stat.state().unwrap();
#[cfg(feature = "chrono")]
stat.starttime().unwrap();
// if this process is defunct/zombie, don't try to read any of the below data
// (some might be successful, but not all)
if stat.state().unwrap() == ProcState::Zombie {
continue;
}
check_unwrap(&prc, prc.cmdline());
check_unwrap(&prc, prc.environ());
check_unwrap(&prc, prc.fd());
check_unwrap(&prc, prc.io());
check_unwrap(&prc, prc.maps());
check_unwrap(&prc, prc.coredump_filter());
// The WSL2 kernel doesn't have autogroup, even though this should be present since linux
// 2.6.36
if is_wsl2 {
assert!(prc.autogroup().is_err());
} else {
check_unwrap(&prc, prc.autogroup());
}
check_unwrap(&prc, prc.auxv());
check_unwrap(&prc, prc.cgroups());
check_unwrap(&prc, prc.wchan());
check_unwrap(&prc, prc.status());
check_unwrap(&prc, prc.mountinfo());
check_unwrap(&prc, prc.mountstats());
check_unwrap(&prc, prc.oom_score());
if let Some(tasks) = check_unwrap(&prc, prc.tasks()) {
for task in tasks {
let task = task.unwrap();
check_unwrap_task(&prc, task.stat());
check_unwrap_task(&prc, task.status());
check_unwrap_task(&prc, task.io());
check_unwrap_task(&prc, task.schedstat());
}
}
}
}
#[test]
fn test_smaps() {
let me = Process::myself().unwrap();
let smaps = match me.smaps() {
Ok(x) => x,
Err(ProcError::NotFound(_)) => {
// ignored because not all kernerls have smaps
return;
}
Err(e) => panic!("{}", e),
};
println!("{:#?}", smaps);
}
#[test]
fn test_smaps_rollup() {
let me = Process::myself().unwrap();
let smaps_rollup = match me.smaps_rollup() {
Ok(x) => x,
Err(ProcError::NotFound(_)) => {
// ignored because not all kernerls have smaps_rollup
return;
}
Err(e) => panic!("{}", e),
};
println!("{:#?}", smaps_rollup);
}
#[test]
fn test_proc_alive() {
let myself = Process::myself().unwrap();
assert!(myself.is_alive());
// zombies should not be considered alive
let mut command = std::process::Command::new("sleep");
command
.arg("0")
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null());
let mut child = command.spawn().unwrap();
let child_pid = child.id() as i32;
// sleep very briefly to allow the child to start and then exit
std::thread::sleep(std::time::Duration::from_millis(30));
let child_proc = Process::new(child_pid).unwrap();
assert!(!child_proc.is_alive(), "Child state is: {:?}", child_proc.stat());
assert!(child_proc.stat().unwrap().state().unwrap() == ProcState::Zombie);
child.wait().unwrap();
assert!(Process::new(child_pid).is_err());
assert!(!child_proc.is_alive(), "Child state is: {:?}", child_proc.stat());
}
#[test]
fn test_proc_environ() {
let myself = Process::myself().unwrap();
let proc_environ = myself.environ().unwrap();
let std_environ: HashMap<_, _> = std::env::vars_os().collect();
assert_eq!(proc_environ, std_environ);
}
#[test]
fn test_error_handling() {
// getting the proc struct should be OK
let init = Process::new(1).unwrap();
let i_have_access = rustix::process::geteuid().as_raw() == init.uid().unwrap();
if !i_have_access {
// but accessing data should result in an error (unless we are running as root!)
assert!(init.cwd().is_err());
assert!(init.environ().is_err());
}
}
#[test]
fn test_proc_exe() {
let myself = Process::myself().unwrap();
let proc_exe = myself.exe().unwrap();
let std_exe = std::env::current_exe().unwrap();
assert_eq!(proc_exe, std_exe);
}
#[test]
fn test_proc_io() {
let myself = Process::myself().unwrap();
let kernel = KernelVersion::current().unwrap();
let io = myself.io();
println!("{:?}", io);
if io.is_ok() {
assert!(kernel >= KernelVersion::new(2, 6, 20));
}
}
#[test]
fn test_proc_maps() {
let myself = Process::myself().unwrap();
let maps = myself.maps().unwrap();
for map in maps {
println!("{:?}", map);
}
}
#[test]
fn test_proc_pagemap() {
let myself = Process::myself().unwrap();
let maps = myself.maps().unwrap();
let stack_map = maps.iter().find(|m| matches!(m.pathname, MMapPath::Stack)).unwrap();
let page_size = crate::page_size().unwrap() as usize;
let start_page = stack_map.address.0 as usize / page_size;
let end_page = stack_map.address.1 as usize / page_size;
let mut pagemap = myself.pagemap().unwrap();
let page_infos = pagemap.get_range_info(start_page..end_page).unwrap();
let present_pages = page_infos.iter().filter(|info| {
if let PageInfo::MemoryPage(flags) = info {
flags.contains(MemoryPageFlags::PRESENT)
} else {
false
}
});
for present_page in present_pages {
println!("{:?}", present_page);
}
}
#[test]
fn test_mmap_path() {
assert_eq!(MMapPath::from("[stack]").unwrap(), MMapPath::Stack);
assert_eq!(MMapPath::from("[foo]").unwrap(), MMapPath::Other("foo".to_owned()));
assert_eq!(MMapPath::from("").unwrap(), MMapPath::Anonymous);
assert_eq!(MMapPath::from("[stack:154]").unwrap(), MMapPath::TStack(154));
assert_eq!(
MMapPath::from("/lib/libfoo.so").unwrap(),
MMapPath::Path(PathBuf::from("/lib/libfoo.so"))
);
}
#[test]
fn test_proc_fds() {
let myself = Process::myself().unwrap();
for f in myself.fd().unwrap() {
let fd = f.unwrap();
println!("{:?} {:?}", fd, fd.mode());
}
}
#[test]
fn test_proc_fd() {
let myself = Process::myself().unwrap();
let raw_fd = myself.fd().unwrap().next().unwrap().unwrap().fd as i32;
let fd = FDInfo::from_raw_fd(myself.pid, raw_fd).unwrap();
println!("{:?} {:?}", fd, fd.mode());
}
#[test]
fn test_proc_coredump() {
let myself = Process::myself().unwrap();
let flags = myself.coredump_filter();
println!("{:?}", flags);
}
#[test]
fn test_proc_auxv() {
let myself = Process::myself().unwrap();
let auxv = myself.auxv().unwrap();
println!("{:?}", auxv);
for (k, v) in auxv {
// See bits/auxv.h
match k {
2 => println!("File descriptor of program: {}", v),
3 => println!("Address of the program headers of the executable: 0x{:x}", v),
4 => println!("Size of program header entry: {}", v),
5 => println!("Number of program headers: {}", v),
6 => {
println!("System page size: {}", v);
assert!(v > 0);
}
7 => {
println!("Base address: 0x{:x}", v);
assert!(v > 0);
}
8 => println!("Flags: 0x{:x}", v),
9 => {
println!("Entry address of the executable: 0x{:x}", v);
assert!(v > 0);
}
11 => {
println!("Real UID: {}", v);
assert_eq!(v as u32, rustix::process::getuid().as_raw());
}
12 => {
println!("Effective UID: {}", v);
assert!(v > 0);
}
13 => {
println!("Real GID: {}", v);
assert!(v > 0);
}
14 => {
println!("Effective GID: {}", v);
assert!(v > 0);
}
15 => {
println!("Platform string address: 0x{:x}", v);
let platform = unsafe { std::ffi::CStr::from_ptr(v as *const _) };
println!("Platform string: {:?}", platform);
}
16 => println!("HW Cap: 0x{:x}", v),
17 => {
println!("Clock ticks per second: {}", v);
assert_eq!(v, crate::ticks_per_second().unwrap());
}
19 => println!("Data cache block size: {}", v),
23 => println!("Run as setuid?: {}", v),
25 => println!("Address of 16 random bytes: 0x{:x}", v),
26 => println!("HW Cap2: 0x{:x}", v),
31 => {
println!("argv[0] address: 0x{:x}", v);
let argv0 = unsafe { std::ffi::CStr::from_ptr(v as *const _) };
println!("argv[0]: {:?}", argv0);
}
k => println!("Unknown key {}: {:x}", k, v),
}
}
}
#[test]
fn test_proc_wchan() {
let myself = Process::myself().unwrap();
let wchan = myself.wchan().unwrap();
println!("{:?}", wchan);
}
#[test]
fn test_proc_loginuid() {
if !Path::new("/proc/self/loginuid").exists() {
return;
}
let myself = Process::myself().unwrap();
let loginuid = myself.loginuid().unwrap();
println!("{:?}", loginuid);
}
#[test]
fn test_nopanic() {
fn inner() -> ProcResult<u8> {
let a = vec!["xyz"];
from_iter(a)
}
assert!(inner().is_err());
}
#[test]
fn test_procinfo() {
// test to see that this crate and procinfo give mostly the same results
fn diff_mem(a: f32, b: f32) {
let diff = (a - b).abs();
assert!(diff < 20000.0, "diff:{}", diff);
}
// take a pause to let things "settle" before getting data. By default, cargo will run
// tests in parallel, which can cause disturbences
std::thread::sleep(std::time::Duration::from_secs(1));
let procinfo_stat = procinfo::pid::stat_self().unwrap();
let me = Process::myself().unwrap();
let me_stat = me.stat().unwrap();
diff_mem(procinfo_stat.vsize as f32, me_stat.vsize as f32);
assert_eq!(me_stat.priority, procinfo_stat.priority as i64);
assert_eq!(me_stat.nice, procinfo_stat.nice as i64);
// flags seem to change during runtime, with PF_FREEZER_SKIP coming and going...
//assert_eq!(me_stat.flags, procinfo_stat.flags, "procfs:{:?} procinfo:{:?}", crate::StatFlags::from_bits(me_stat.flags), crate::StatFlags::from_bits(procinfo_stat.flags));
assert_eq!(me_stat.pid, procinfo_stat.pid);
assert_eq!(me_stat.ppid, procinfo_stat.ppid);
}
#[test]
fn test_statm() {
let me = Process::myself().unwrap();
let statm = me.statm().unwrap();
println!("{:#?}", statm);
}
#[test]
fn test_schedstat() {
let me = Process::myself().unwrap();
let schedstat = me.schedstat().unwrap();
println!("{:#?}", schedstat);
}
#[test]
fn test_fdtarget() {
// none of these values are valid, but were found by a fuzzer to crash procfs. this
// test ensures that the crashes have been fixed
let _ = FDTarget::from_str(":");
let _ = FDTarget::from_str("n:ǟF");
let _ = FDTarget::from_str("pipe:");
}
#[test]
fn test_fdtarget_memfd() {
let memfd = FDTarget::from_str("/memfd:test").unwrap();
assert!(matches!(memfd, FDTarget::MemFD(s) if s == "test"));
}

280
vendor/procfs/src/sys/fs/binfmt_misc.rs vendored Normal file
View file

@ -0,0 +1,280 @@
use bitflags::bitflags;
use std::path::Path;
use crate::{read_value, ProcResult};
/// Returns true if the miscellaneous Binary Formats system is enabled.
pub fn enabled() -> ProcResult<bool> {
let val: String = read_value("/proc/sys/fs/binfmt_misc/status")?;
Ok(val == "enabled")
}
fn hex_to_vec(hex: &str) -> ProcResult<Vec<u8>> {
if hex.len() % 2 != 0 {
return Err(build_internal_error!(format!(
"Hex string {:?} has non-even length",
hex
)));
}
let mut idx = 0;
let mut data = Vec::new();
while idx < hex.len() {
let byte = from_str!(u8, &hex[idx..idx + 2], 16);
data.push(byte);
idx += 2;
}
Ok(data)
}
#[derive(Debug, Clone)]
pub enum BinFmtData {
/// A BinFmt entry based on a file extension (does not include the period)
Extension(String),
/// A BinFmt entry based on magic string matching
Magic { offset: u8, magic: Vec<u8>, mask: Vec<u8> },
}
/// A registered binary format entry
///
/// For more info, see the kernel doc Documentation/admin-guide/binfmt-misc.rst
#[derive(Debug, Clone)]
pub struct BinFmtEntry {
/// The name of the entry
///
/// Corresponds to a file in /proc/sys/fs/binfmt_misc/
pub name: String,
/// Is the entry enabled or not
pub enabled: bool,
/// Full path to the interpreter to run this entry
pub interpreter: String,
///
pub flags: BinFmtFlags,
pub data: BinFmtData,
}
impl BinFmtEntry {
pub(crate) fn from_string(name: String, data: &str) -> ProcResult<Self> {
let mut enabled = false;
let mut interpreter = String::new();
let mut ext = None;
let mut offset = 0;
let mut magic = Vec::new();
let mut mask = Vec::new();
let mut flags = BinFmtFlags::empty();
for line in data.lines() {
if line == "enabled" {
enabled = true;
} else if let Some(stripped) = line.strip_prefix("interpreter ") {
interpreter = stripped.to_string();
} else if let Some(stripped) = line.strip_prefix("flags:") {
flags = BinFmtFlags::from_str(stripped);
} else if let Some(stripped) = line.strip_prefix("extension .") {
ext = Some(stripped.to_string());
} else if let Some(stripped) = line.strip_prefix("offset ") {
offset = from_str!(u8, stripped);
} else if let Some(stripped) = line.strip_prefix("magic ") {
let hex = stripped;
magic = hex_to_vec(dbg!(hex))?;
} else if let Some(stripped) = line.strip_prefix("mask ") {
let hex = stripped;
mask = hex_to_vec(hex)?;
}
}
if !magic.is_empty() && mask.is_empty() {
mask.resize(magic.len(), 0xff);
}
Ok(BinFmtEntry {
name,
enabled,
interpreter,
flags,
data: if let Some(ext) = ext {
BinFmtData::Extension(ext)
} else {
BinFmtData::Magic { magic, mask, offset }
},
})
}
}
bitflags! {
/// Various key flags
pub struct BinFmtFlags: u8 {
/// Preserve Argv[0]
///
/// Legacy behavior of binfmt_misc is to overwrite the original argv[0] with the full path to the binary. When
/// this flag is included, binfmt_misc will add an argument to the argument vector for this purpose, thus
/// preserving the original `argv[0]`.
///
/// For example, If your interp is set to `/bin/foo` and you run `blah` (which is in `/usr/local/bin`),
/// then the kernel will execute `/bin/foo` with `argv[]` set to `["/bin/foo", "/usr/local/bin/blah", "blah"]`.
///
/// The interp has to be aware of this so it can execute `/usr/local/bin/blah` with `argv[]` set to `["blah"]`.
const P = 0x01;
/// Open Binary
///
/// Legacy behavior of binfmt_misc is to pass the full path
/// of the binary to the interpreter as an argument. When this flag is
/// included, binfmt_misc will open the file for reading and pass its
/// descriptor as an argument, instead of the full path, thus allowing
/// the interpreter to execute non-readable binaries. This feature
/// should be used with care - the interpreter has to be trusted not to
//// emit the contents of the non-readable binary.
const O = 0x02;
/// Credentials
///
/// Currently, the behavior of binfmt_misc is to calculate
/// the credentials and security token of the new process according to
/// the interpreter. When this flag is included, these attributes are
/// calculated according to the binary. It also implies the `O` flag.
/// This feature should be used with care as the interpreter
/// will run with root permissions when a setuid binary owned by root
/// is run with binfmt_misc.
const C = 0x04;
/// Fix binary
///
/// The usual behaviour of binfmt_misc is to spawn the
/// binary lazily when the misc format file is invoked. However,
/// this doesn't work very well in the face of mount namespaces and
/// changeroots, so the `F` mode opens the binary as soon as the
/// emulation is installed and uses the opened image to spawn the
/// emulator, meaning it is always available once installed,
/// regardless of how the environment changes.
const F = 0x08;
}
}
impl BinFmtFlags {
pub(crate) fn from_str(s: &str) -> Self {
s.chars()
.filter_map(|c| match c {
'P' => Some(BinFmtFlags::P),
'O' => Some(BinFmtFlags::O),
'C' => Some(BinFmtFlags::C),
'F' => Some(BinFmtFlags::F),
_ => None,
})
.fold(BinFmtFlags::empty(), |a, b| a | b)
}
}
pub fn list() -> ProcResult<Vec<BinFmtEntry>> {
let path = Path::new("/proc/sys/fs/binfmt_misc/");
let mut v = Vec::new();
for entry in wrap_io_error!(path, path.read_dir())? {
let entry = entry?;
if entry.file_name() == "status" || entry.file_name() == "register" {
// these entries do not represent real entries
continue;
}
let name = entry.file_name().to_string_lossy().to_string();
let data = std::fs::read_to_string(entry.path())?;
v.push(BinFmtEntry::from_string(name, &data)?);
}
Ok(v)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_enabled() {
match enabled() {
Ok(_) => {}
Err(crate::ProcError::NotFound(_)) => {}
Err(e) => panic!("{}", e),
}
}
#[test]
fn parse_magic() {
let mask = "7f454c460201010000000000000000000200f300";
let data = hex_to_vec(mask).unwrap();
println!("{:?}", data);
assert_eq!(data.len(), 20);
assert_eq!(data[0], 0x7f);
assert_eq!(data[1], 0x45);
assert!(hex_to_vec("a").is_err());
assert!(hex_to_vec("zz").is_err());
}
#[test]
fn flags_parsing() {
assert!(BinFmtFlags::from_str("").is_empty());
let flags = BinFmtFlags::from_str("F");
assert_eq!(flags, BinFmtFlags::F);
let flags = BinFmtFlags::from_str("OCF");
assert_eq!(flags, BinFmtFlags::F | BinFmtFlags::C | BinFmtFlags::O);
}
#[test]
fn binfmt() {
let data = r#"enabled
interpreter /usr/bin/qemu-riscv64-static
flags: OCF
offset 12
magic 7f454c460201010000000000000000000200f300
mask ffffffffffffff00fffffffffffffffffeffffff"#;
let entry = BinFmtEntry::from_string("test".to_owned(), data).unwrap();
println!("{:#?}", entry);
assert_eq!(entry.flags, BinFmtFlags::F | BinFmtFlags::C | BinFmtFlags::O);
assert!(entry.enabled);
assert_eq!(entry.interpreter, "/usr/bin/qemu-riscv64-static");
if let BinFmtData::Magic { offset, magic, mask } = entry.data {
assert_eq!(offset, 12);
assert_eq!(magic.len(), mask.len());
assert_eq!(
magic,
vec![
0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x00, 0xf3, 0x00
]
);
} else {
panic!("Unexpected data");
}
let data = r#"enabled
interpreter /bin/hello
flags:
extension .hello"#;
let entry = BinFmtEntry::from_string("test".to_owned(), data).unwrap();
println!("{:#?}", entry);
assert_eq!(entry.flags, BinFmtFlags::empty());
assert!(entry.enabled);
assert_eq!(entry.interpreter, "/bin/hello");
if let BinFmtData::Extension(ext) = entry.data {
assert_eq!(ext, "hello");
} else {
panic!("Unexpected data");
}
}
#[test]
fn live() {
for entry in super::list().unwrap() {
println!("{:?}", entry);
}
}
}

30
vendor/procfs/src/sys/fs/epoll.rs vendored Normal file
View file

@ -0,0 +1,30 @@
use crate::{read_value, write_value, ProcResult};
/// Get the limit on the total number of file descriptors that a user can register across all epoll instances.
///
/// The limit is per real user ID. Each registered file descriptor costs roughtly 90 bytes on a 32-bit kernel,
/// and roughly 160 bytes on a 64-bit kernel. Currently, the default value for `max_user_watches` is 1/25 (4%)
/// of the available low memory, divided by the registration cost in bytes.
///
/// (Since Linux 2.6.28)
pub fn max_user_watches() -> ProcResult<u64> {
read_value("/proc/sys/fs/epoll/max_user_watches")
}
/// Sets the limit on the total number of file descriptors that a user can register across all epoll instances.
pub fn set_max_user_watches(val: u64) -> ProcResult<()> {
write_value("/proc/sys/fs/epoll/max_user_watches", val)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::KernelVersion;
#[test]
fn test_max_user_watches() {
if KernelVersion::current().unwrap() >= KernelVersion::new(2, 6, 28) {
println!("{}", max_user_watches().unwrap());
}
}
}

104
vendor/procfs/src/sys/fs/mod.rs vendored Normal file
View file

@ -0,0 +1,104 @@
//! This modules contains functions for kernel variables related to filesystems
use crate::{read_file, read_value, write_value, ProcResult};
use std::time::Duration;
pub mod binfmt_misc;
pub mod epoll;
/// Information about the status of the directory cache (dcache)
#[derive(Debug, Clone)]
pub struct DEntryState {
/// The number of allocated dentries (dcache entries)
///
/// Unused in Linux 2.2
pub nr_dentry: u32,
/// The number of unused dentries.
pub nr_unused: u32,
/// The age after which dcache entries can be reclaimied when memory is short
pub age_limit: Duration,
/// Is true when the kernel has called `shrink_dcache_pages()` and the dcache isn't pruned yet.
pub want_pages: bool,
}
impl DEntryState {
fn from_str(s: &str) -> ProcResult<DEntryState> {
let mut s = s.split_whitespace();
let nr_dentry = from_str!(u32, expect!(s.next()));
let nr_unused = from_str!(u32, expect!(s.next()));
let age_limit_sec = from_str!(u32, expect!(s.next()));
let want_pages = from_str!(u32, expect!(s.next()));
Ok(DEntryState {
nr_dentry,
nr_unused,
age_limit: Duration::from_secs(age_limit_sec as u64),
want_pages: want_pages != 0,
})
}
}
/// Get information about the status of the directory cache (dcache)
///
/// Linux Linux 2.2
pub fn dentry_state() -> ProcResult<DEntryState> {
let s: String = read_file("/proc/sys/fs/dentry-state")?;
DEntryState::from_str(&s)
}
/// Get the system-wide limit on the number of open files for all processes.
///
/// System calls that fail when encoun tering this limit fail with the error `ENFILE`.
pub fn file_max() -> ProcResult<usize> {
read_value("/proc/sys/fs/file-max")
}
/// Set the system-wide limit on the number of open files for all processes.
pub fn set_file_max(max: usize) -> ProcResult<()> {
write_value("/proc/sys/fs/file-max", max)
}
#[derive(Debug, Clone)]
pub struct FileState {
/// The number of allocated file handles.
///
/// (i.e. the number of files presently opened)
pub allocated: u64,
/// The number of free file handles.
pub free: u64,
/// The maximum number of file handles.
///
/// This may be u64::MAX
pub max: u64,
}
pub fn file_nr() -> ProcResult<FileState> {
let s = read_file("/proc/sys/fs/file-nr")?;
let mut s = s.split_whitespace();
let allocated = from_str!(u64, expect!(s.next()));
let free = from_str!(u64, expect!(s.next()));
let max = from_str!(u64, expect!(s.next()));
Ok(FileState { allocated, free, max })
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn dentry() {
let d = dentry_state().unwrap();
println!("{:?}", d);
}
#[test]
fn filenr() {
let f = file_nr().unwrap();
println!("{:?}", f);
}
}

127
vendor/procfs/src/sys/kernel/keys.rs vendored Normal file
View file

@ -0,0 +1,127 @@
//! Functions related to the in-kernel key management and retention facility
//!
//! For more details on this facility, see the `keyrings(7)` man page.
//!
//! Additional functions can be found in the [keyring](crate::keyring) module.
use crate::{read_value, write_value, ProcResult};
/// GC Delay
///
/// The value in this file specifies the interval, in seconds,
/// after which revoked and expired keys will be garbage collected.
/// The purpose of having such an interval is so that
/// there is a window of time where user space can see an error
/// (respectively EKEYREVOKED and EKEYEXPIRED) that indicates what
/// happened to the key.
///
/// The default value in this file is 300 (i.e., 5 minutes).
///
/// (since Linux 2.6.32)
pub fn gc_delay() -> ProcResult<u32> {
read_value("/proc/sys/kernel/keys/gc_delay")
}
/// Persistent Keyring Expiry
///
/// This file defines an interval, in seconds, to which the persistent
/// keyring's expiration timer is reset each time the
/// keyring is accessed (via keyctl_get_persistent(3) or the
/// keyctl(2) KEYCTL_GET_PERSISTENT operation.)
///
/// The default value in this file is 259200 (i.e., 3 days).
///
/// (Since Linux 3.13)
pub fn persistent_keyring_expiry() -> ProcResult<u32> {
read_value("/proc/sys/kernel/keys/persistent_keyring_expiry")
}
/// Max bytes
///
/// This is the maximum number of bytes of data that a nonroot
/// user can hold in the payloads of the keys owned by the user.
///
/// The default value in this file is 20,000.
///
/// (since linux 2.6.26)
pub fn maxbytes() -> ProcResult<u32> {
read_value("/proc/sys/kernel/keys/maxbytes")
}
/// Set max bytes
pub fn set_maxbytes(bytes: u32) -> ProcResult<()> {
write_value("/proc/sys/kernel/keys/maxbytes", bytes)
}
/// Max keys
///
/// This is the maximum number of keys that a nonroot user may own.
///
/// (since linux 2.6.26)
pub fn maxkeys() -> ProcResult<u32> {
read_value("/proc/sys/kernel/keys/maxkeys")
}
/// Set max keys
pub fn set_maxkeys(keys: u32) -> ProcResult<()> {
write_value("/proc/sys/kernel/keys/maxkeys", keys)
}
/// Root maxbytes
///
/// This is the maximum number of bytes of data that the root user
/// (UID 0 in the root user namespace) can hold in the payloads of
/// the keys owned by root.
///
/// The default value in this file is 25,000,000 (20,000 before Linux 3.17).
///
/// (since Linux 2.6.26)
pub fn root_maxbytes() -> ProcResult<u32> {
read_value("/proc/sys/kernel/keys/root_maxbytes")
}
/// Set root maxbytes
pub fn set_root_maxbytes(bytes: u32) -> ProcResult<()> {
write_value("/proc/sys/kernel/keys/root_maxbytes", bytes)
}
/// Root maxkeys
///
/// This is the maximum number of keys that the root user (UID 0 in the root user namespace) may own.
///
/// The default value in this file is 1,000,000 (200 before Linux 3.17).
/// (since Linux 2.6.26)
pub fn root_maxkeys() -> ProcResult<u32> {
read_value("/proc/sys/kernel/keys/root_maxkeys")
}
/// Set root maxkeys
pub fn set_root_maxkeys(keys: u32) -> ProcResult<()> {
write_value("/proc/sys/kernel/keys/root_maxkeys", keys)
}
#[cfg(test)]
mod tests {
use crate::{ProcError, ProcResult};
fn check_unwrap<T>(val: ProcResult<T>) {
match val {
Ok(_) => {}
Err(ProcError::NotFound(_)) => {
// ok to ignore
}
Err(e) => {
panic!("Unexpected proc error: {:?}", e);
}
}
}
#[test]
fn test_keys() {
check_unwrap(super::gc_delay());
check_unwrap(super::persistent_keyring_expiry());
check_unwrap(super::maxbytes());
check_unwrap(super::maxkeys());
check_unwrap(super::root_maxbytes());
check_unwrap(super::root_maxkeys());
}
}

589
vendor/procfs/src/sys/kernel/mod.rs vendored Normal file
View file

@ -0,0 +1,589 @@
//! Global kernel info / tuning miscellaneous stuff
//!
//! The files in this directory can be used to tune and monitor miscellaneous
//! and general things in the operation of the Linux kernel.
use std::cmp;
use std::collections::HashSet;
use std::str::FromStr;
use bitflags::bitflags;
use crate::{read_value, write_value, ProcError, ProcResult, KERNEL};
pub mod keys;
pub mod random;
/// Represents a kernel version, in major.minor.release version.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Version {
pub major: u8,
pub minor: u8,
pub patch: u16,
}
impl Version {
pub fn new(major: u8, minor: u8, patch: u16) -> Version {
Version { major, minor, patch }
}
/// Returns the kernel version of the currently running kernel.
///
/// This is taken from `/proc/sys/kernel/osrelease`;
pub fn current() -> ProcResult<Self> {
read_value("/proc/sys/kernel/osrelease")
}
/// Parses a kernel version string, in major.minor.release syntax.
///
/// Note that any extra information (stuff after a dash) is ignored.
///
/// # Example
///
/// ```
/// # use procfs::KernelVersion;
/// let a = KernelVersion::from_str("3.16.0-6-amd64").unwrap();
/// let b = KernelVersion::new(3, 16, 0);
/// assert_eq!(a, b);
///
/// ```
#[allow(clippy::should_implement_trait)]
pub fn from_str(s: &str) -> Result<Self, &'static str> {
let pos = s.find(|c: char| c != '.' && !c.is_ascii_digit());
let kernel = if let Some(pos) = pos {
let (s, _) = s.split_at(pos);
s
} else {
s
};
let mut kernel_split = kernel.split('.');
let major = kernel_split.next().ok_or("Missing major version component")?;
let minor = kernel_split.next().ok_or("Missing minor version component")?;
let patch = kernel_split.next().ok_or("Missing patch version component")?;
let major = major.parse().map_err(|_| "Failed to parse major version")?;
let minor = minor.parse().map_err(|_| "Failed to parse minor version")?;
let patch = patch.parse().map_err(|_| "Failed to parse patch version")?;
Ok(Version { major, minor, patch })
}
}
impl FromStr for Version {
type Err = &'static str;
/// Parses a kernel version string, in major.minor.release syntax.
///
/// Note that any extra information (stuff after a dash) is ignored.
///
/// # Example
///
/// ```
/// # use procfs::KernelVersion;
/// let a: KernelVersion = "3.16.0-6-amd64".parse().unwrap();
/// let b = KernelVersion::new(3, 16, 0);
/// assert_eq!(a, b);
///
/// ```
fn from_str(s: &str) -> Result<Self, Self::Err> {
Version::from_str(s)
}
}
impl cmp::Ord for Version {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match self.major.cmp(&other.major) {
cmp::Ordering::Equal => match self.minor.cmp(&other.minor) {
cmp::Ordering::Equal => self.patch.cmp(&other.patch),
x => x,
},
x => x,
}
}
}
impl cmp::PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
/// Represents a kernel type
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Type {
pub sysname: String,
}
impl Type {
pub fn new(sysname: String) -> Type {
Type { sysname }
}
/// Read the kernel type from current running kernel
///
/// Defined in `include/linux/uts.h` as UTS_SYSNAME, default is "Linux".
/// The file is located at `/proc/sys/kernel/ostype`.
pub fn current() -> ProcResult<Self> {
read_value("/proc/sys/kernel/ostype")
}
}
impl FromStr for Type {
type Err = &'static str;
/// Parse a kernel type string
///
/// Notice that in Linux source code, it is defined as a single string.
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Type::new(s.to_string()))
}
}
/// Represents a kernel build information
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BuildInfo {
pub version: String,
pub flags: HashSet<String>,
/// This field contains any extra data from the /proc/sys/kernel/version file. It generally contains the build date of the kernel, but the format of the date can vary.
///
/// A method named `extra_date` is provided which would try to parse some date formats. When the date format is not supported, an error will be returned. It depends on chrono feature.
pub extra: String,
}
impl BuildInfo {
pub fn new(version: &str, flags: HashSet<String>, extra: String) -> BuildInfo {
BuildInfo {
version: version.to_string(),
flags,
extra,
}
}
/// Read the kernel build information from current running kernel
///
/// Generated by `scripts/mkcompile_h` when building the kernel.
/// The file is located at `/proc/sys/kernel/version`.
pub fn current() -> ProcResult<Self> {
read_value("/proc/sys/kernel/version")
}
/// Check if SMP is ON
pub fn smp(&self) -> bool {
self.flags.contains("SMP")
}
/// Check if PREEMPT is ON
pub fn preempt(&self) -> bool {
self.flags.contains("PREEMPT")
}
/// Check if PREEMPTRT is ON
pub fn preemptrt(&self) -> bool {
self.flags.contains("PREEMPTRT")
}
/// Return version number
///
/// This would parse number from first digits of version string. For example, #21~1 to 21.
pub fn version_number(&self) -> ProcResult<u32> {
let mut version_str = String::new();
for c in self.version.chars() {
if c.is_ascii_digit() {
version_str.push(c);
} else {
break;
}
}
let version_number: u32 = version_str.parse().map_err(|_| "Failed to parse version number")?;
Ok(version_number)
}
/// Parse extra field to `DateTime` object
///
/// This function may fail as TIMESTAMP can be various formats.
#[cfg(feature = "chrono")]
pub fn extra_date(&self) -> ProcResult<chrono::DateTime<chrono::Local>> {
if let Ok(dt) =
chrono::DateTime::parse_from_str(&format!("{} +0000", &self.extra), "%a %b %d %H:%M:%S UTC %Y %z")
{
return Ok(dt.with_timezone(&chrono::Local));
}
if let Ok(dt) = chrono::DateTime::parse_from_str(&self.extra, "%a, %d %b %Y %H:%M:%S %z") {
return Ok(dt.with_timezone(&chrono::Local));
}
Err(ProcError::Other("Failed to parse extra field to date".to_string()))
}
}
impl FromStr for BuildInfo {
type Err = &'static str;
/// Parse a kernel build information string
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut version = String::new();
let mut flags: HashSet<String> = HashSet::new();
let mut extra: String = String::new();
let mut splited = s.split(' ');
let version_str = splited.next();
if let Some(version_str) = version_str {
if let Some(stripped) = version_str.strip_prefix('#') {
version.push_str(stripped);
} else {
return Err("Failed to parse kernel build version");
}
} else {
return Err("Failed to parse kernel build version");
}
for s in &mut splited {
if s.chars().all(char::is_uppercase) {
flags.insert(s.to_string());
} else {
extra.push_str(s);
extra.push(' ');
break;
}
}
let remains: Vec<&str> = splited.collect();
extra.push_str(&remains.join(" "));
Ok(BuildInfo { version, flags, extra })
}
}
/// Returns the maximum process ID number.
///
/// This is taken from `/proc/sys/kernel/pid_max`.
///
/// # Example
///
/// ```
/// let pid_max = procfs::sys::kernel::pid_max().unwrap();
///
/// let pid = 42; // e.g. from user input, CLI args, etc.
///
/// if pid > pid_max {
/// eprintln!("bad process ID: {}", pid)
/// } else {
/// println!("good process ID: {}", pid);
/// }
/// ```
pub fn pid_max() -> ProcResult<i32> {
read_value("/proc/sys/kernel/pid_max")
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
/// Represents the data from `/proc/sys/kernel/sem`
pub struct SemaphoreLimits {
/// The maximum semaphores per semaphore set
pub semmsl: u64,
/// A system-wide limit on the number of semaphores in all semaphore sets
pub semmns: u64,
/// The maximum number of operations that may be specified in a semop(2) call
pub semopm: u64,
/// A system-wide limit on the maximum number of semaphore identifiers
pub semmni: u64,
}
impl SemaphoreLimits {
pub fn new() -> ProcResult<Self> {
read_value("/proc/sys/kernel/sem")
}
fn from_str(s: &str) -> Result<Self, &'static str> {
let mut s = s.split_ascii_whitespace();
let semmsl = s.next().ok_or("Missing SEMMSL")?;
let semmns = s.next().ok_or("Missing SEMMNS")?;
let semopm = s.next().ok_or("Missing SEMOPM")?;
let semmni = s.next().ok_or("Missing SEMMNI")?;
let semmsl = semmsl.parse().map_err(|_| "Failed to parse SEMMSL")?;
let semmns = semmns.parse().map_err(|_| "Failed to parse SEMMNS")?;
let semopm = semopm.parse().map_err(|_| "Failed to parse SEMOPM")?;
let semmni = semmni.parse().map_err(|_| "Failed to parse SEMMNI")?;
Ok(SemaphoreLimits {
semmsl,
semmns,
semopm,
semmni,
})
}
}
impl FromStr for SemaphoreLimits {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
SemaphoreLimits::from_str(s)
}
}
/// Returns the system-wide limit on the total number of pages of System V shared memory
///
/// This is taken from `/proc/sys/kernel/shmall`
pub fn shmall() -> ProcResult<u64> {
read_value("/proc/sys/kernel/shmall")
}
/// Returns the limit on the maximum (System V IPC) shared memory segment size that can be created.
/// The value defaults to SHMMAX
///
/// See also [set_shmmax](crate::sys::kernel::set_shmmax)
///
/// This is taken from `/proc/sys/kernel/shmmax`
pub fn shmmax() -> ProcResult<u64> {
read_value("/proc/sys/kernel/shmmax")
}
/// Sets the limit on the maximum (System V IPC) shared memory segment size.
///
/// See also [shmmax](crate::sys::kernel::shmmax)
pub fn set_shmmax(new_value: u64) -> ProcResult<()> {
write_value("/proc/sys/kernel/shmmax", new_value)
}
/// Returns the system-wide maximum number of System V shared memory segments that can be created
///
/// This is taken from `/proc/sys/kernel/shmmni`
pub fn shmmni() -> ProcResult<u64> {
read_value("/proc/sys/kernel/shmmni")
}
bitflags! {
/// Flags representing allowed sysrq functions
pub struct AllowedFunctions : u16 {
/// Enable control of console log level
const ENABLE_CONTROL_LOG_LEVEL = 2;
/// Enable control of keyboard (SAK, unraw)
const ENABLE_CONTROL_KEYBOARD = 4;
/// Enable debugging dumps of processes etc
const ENABLE_DEBUGGING_DUMPS = 8;
/// Enable sync command
const ENABLE_SYNC_COMMAND = 16;
/// Enable remound read-only
const ENABLE_REMOUNT_READ_ONLY = 32;
/// Enable signaling of processes (term, kill, oom-kill)
const ENABLE_SIGNALING_PROCESSES = 64;
/// Allow reboot/poweroff
const ALLOW_REBOOT_POWEROFF = 128;
/// Allow nicing of all real-time tasks
const ALLOW_NICING_REAL_TIME_TASKS = 256;
}
}
/// Values controlling functions allowed to be invoked by the SysRq key
///
/// To construct this enum, see [sysrq](crate::sys::kernel::sysrq)
#[derive(Copy, Clone, Debug)]
pub enum SysRq {
/// Disable sysrq completely
Disable,
/// Enable all functions of sysrq
Enable,
/// Bitmask of allowed sysrq functions
AllowedFunctions(AllowedFunctions),
}
impl SysRq {
fn to_number(self) -> u16 {
match self {
SysRq::Disable => 0,
SysRq::Enable => 1,
SysRq::AllowedFunctions(allowed) => allowed.bits,
}
}
fn from_str(s: &str) -> ProcResult<Self> {
match s.parse::<u16>()? {
0 => Ok(SysRq::Disable),
1 => Ok(SysRq::Enable),
x => match AllowedFunctions::from_bits(x) {
Some(allowed) => Ok(SysRq::AllowedFunctions(allowed)),
None => Err("Invalid value".into()),
},
}
}
}
impl FromStr for SysRq {
type Err = ProcError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
SysRq::from_str(s)
}
}
/// Return functions allowed to be invoked by the SysRq key
///
/// This is taken from `/proc/sys/kernel/sysrq`
pub fn sysrq() -> ProcResult<SysRq> {
read_value("/proc/sys/kernel/sysrq")
}
/// Set functions allowed to be invoked by the SysRq key
pub fn set_sysrq(new: SysRq) -> ProcResult<()> {
write_value("/proc/sys/kernel/sysrq", new.to_number())
}
/// The minimum value that can be written to `/proc/sys/kernel/threads-max` on Linux 4.1 or later
pub const THREADS_MIN: u32 = 20;
/// The maximum value that can be written to `/proc/sys/kernel/threads-max` on Linux 4.1 or later
pub const THREADS_MAX: u32 = 0x3fff_ffff;
/// Returns the system-wide limit on the number of threads (tasks) that can be created on the system.
///
/// This is taken from `/proc/sys/kernel/threads-max`
pub fn threads_max() -> ProcResult<u32> {
read_value("/proc/sys/kernel/threads-max")
}
/// Sets the system-wide limit on the number of threads (tasks) that can be created on the system.
///
/// Since Linux 4.1, this value is bounded, and must be in the range [THREADS_MIN]..=[THREADS_MAX].
/// This function will return an error if that is not the case.
pub fn set_threads_max(new_limit: u32) -> ProcResult<()> {
if let Ok(kernel) = *KERNEL {
if kernel.major >= 4 && kernel.minor >= 1 && !(THREADS_MIN..=THREADS_MAX).contains(&new_limit) {
return Err(ProcError::Other(format!(
"{} is outside the THREADS_MIN..=THREADS_MAX range",
new_limit
)));
}
}
write_value("/proc/sys/kernel/threads-max", new_limit)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_version() {
let a = Version::from_str("3.16.0-6-amd64").unwrap();
let b = Version::new(3, 16, 0);
assert_eq!(a, b);
let a = Version::from_str("3.16.0").unwrap();
let b = Version::new(3, 16, 0);
assert_eq!(a, b);
let a = Version::from_str("3.16.0_1").unwrap();
let b = Version::new(3, 16, 0);
assert_eq!(a, b);
}
#[test]
fn test_type() {
let a = Type::from_str("Linux").unwrap();
assert_eq!(a.sysname, "Linux");
}
#[test]
fn test_build_info() {
// For Ubuntu, Manjaro, CentOS and others:
let a = BuildInfo::from_str("#1 SMP PREEMPT Thu Sep 30 15:29:01 UTC 2021").unwrap();
let mut flags: HashSet<String> = HashSet::new();
flags.insert("SMP".to_string());
flags.insert("PREEMPT".to_string());
assert_eq!(a.version, "1");
assert_eq!(a.version_number().unwrap(), 1);
assert_eq!(a.flags, flags);
assert!(a.smp());
assert!(a.preempt());
assert!(!a.preemptrt());
assert_eq!(a.extra, "Thu Sep 30 15:29:01 UTC 2021");
#[cfg(feature = "chrono")]
let _ = a.extra_date().unwrap();
// For Arch and others:
let b = BuildInfo::from_str("#1 SMP PREEMPT Fri, 12 Nov 2021 19:22:10 +0000").unwrap();
assert_eq!(b.version, "1");
assert_eq!(b.version_number().unwrap(), 1);
assert_eq!(b.flags, flags);
assert_eq!(b.extra, "Fri, 12 Nov 2021 19:22:10 +0000");
assert!(b.smp());
assert!(b.preempt());
assert!(!b.preemptrt());
#[cfg(feature = "chrono")]
let _ = b.extra_date().unwrap();
// For Debian and others:
let c = BuildInfo::from_str("#1 SMP Debian 5.10.46-4 (2021-08-03)").unwrap();
let mut flags: HashSet<String> = HashSet::new();
flags.insert("SMP".to_string());
assert_eq!(c.version, "1");
assert_eq!(c.version_number().unwrap(), 1);
assert_eq!(c.flags, flags);
assert_eq!(c.extra, "Debian 5.10.46-4 (2021-08-03)");
assert!(c.smp());
assert!(!c.preempt());
assert!(!c.preemptrt());
// Skip the date parsing for now
}
#[test]
fn test_current() {
let _ = Version::current().unwrap();
let _ = Type::current().unwrap();
let _ = BuildInfo::current().unwrap();
}
#[test]
fn test_pid_max() {
assert!(pid_max().is_ok());
}
#[test]
fn test_semaphore_limits() {
// Note that the below string has tab characters in it. Make sure to not remove them.
let a = SemaphoreLimits::from_str("32000 1024000000 500 32000").unwrap();
let b = SemaphoreLimits {
semmsl: 32_000,
semmns: 1_024_000_000,
semopm: 500,
semmni: 32_000,
};
assert_eq!(a, b);
let a = SemaphoreLimits::from_str("1");
assert!(a.is_err() && a.err().unwrap() == "Missing SEMMNS");
let a = SemaphoreLimits::from_str("1 string 500 3200");
assert!(a.is_err() && a.err().unwrap() == "Failed to parse SEMMNS");
}
#[test]
fn test_sem() {
let _ = SemaphoreLimits::new().unwrap();
}
#[test]
fn test_shmall() {
let _ = shmall().unwrap();
}
#[test]
fn test_shmmax() {
let _ = shmmax().unwrap();
}
#[test]
fn test_shmmni() {
let _ = shmmni().unwrap();
}
#[test]
fn test_sysrq() {
let sys_rq = sysrq().unwrap();
println!("{:?}", sys_rq)
}
#[test]
fn test_threads_max() {
let _ = threads_max().unwrap();
}
}

112
vendor/procfs/src/sys/kernel/random.rs vendored Normal file
View file

@ -0,0 +1,112 @@
//! These files provide additional information about the /dev/random device
//!
//! Note that some of these entries are only documented in random(4), while some are also documented under proc(5)
use crate::{read_value, write_value, ProcError, ProcResult};
use lazy_static::lazy_static;
lazy_static! {
static ref RANDOM_ROOT: std::path::PathBuf = std::path::PathBuf::from("/proc/sys/kernel/random");
}
/// This read-only file gives the available entropy, in bits. This will be a number in the range
/// 0 to 4096
pub fn entropy_avail() -> ProcResult<u16> {
read_value(RANDOM_ROOT.join("entropy_avail"))
}
/// This file gives the size of the entropy pool
///
/// The semantics of this file are different on kernel versions older than 2.6, however, since
/// Linux 2.6 it is read-only, and gives the size of the entropy pool in bits, containing the value 4096.
///
/// See `man random(4)` for more information
pub fn poolsize() -> ProcResult<u16> {
read_value(RANDOM_ROOT.join("poolsize"))
}
/// This file contains the number of bits of entropy required for waking up processes that sleep waiting
/// for entropy from /dev/random
///
/// The default is 64.
///
/// This will first attempt to read from `/proc/sys/kernel/random/read_wakeup_threshold` but it
/// will fallback to `/proc/sys/kernel/random/write_wakeup_threshold` if the former file is not found.
pub fn read_wakeup_threshold() -> ProcResult<u32> {
match read_value(RANDOM_ROOT.join("read_wakeup_threshold")) {
Ok(val) => Ok(val),
Err(err) => match err {
ProcError::NotFound(_) => read_value(RANDOM_ROOT.join("write_wakeup_threshold")),
err => Err(err),
},
}
}
/// This file contains the number of bits of entropy below which we wake up processes that do a
/// select(2) or poll(2) for write access to /dev/random. These values can be changed by writing to the file.
pub fn write_wakeup_threshold(new_value: u32) -> ProcResult<()> {
write_value(RANDOM_ROOT.join("write_wakeup_threshold"), new_value)
}
/// This read-only file randomly generates a fresh 128-bit UUID on each read
pub fn uuid() -> ProcResult<String> {
read_value(RANDOM_ROOT.join("uuid"))
}
/// This is a read-only file containing a 128-bit UUID generated at boot
pub fn boot_id() -> ProcResult<String> {
read_value(RANDOM_ROOT.join("boot_id"))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_entropy_avail() {
let entropy = entropy_avail().unwrap();
assert!(entropy <= 4096);
}
#[test]
fn test_poolsize() {
// The kernel support section in the root lib.rs file says that we only aim to support >= 2.6 kernels,
// so only test that case
let _poolsize = poolsize().unwrap();
}
#[test]
fn test_read_wakeup_threshold() {
let threshold = read_wakeup_threshold().unwrap();
println!("{}", threshold);
}
#[test]
fn test_write_wakeup_threshold() {
let old_threshold = read_wakeup_threshold().unwrap();
match write_wakeup_threshold(1024) {
Ok(_) => (),
Err(err) => match err {
ProcError::PermissionDenied(_) => {
// This is ok, not everyone wants to run our tests as root
return;
}
err => panic!("test_write_wakeup_threshold error: {:?}", err),
},
}
// If we got here, let's restore the old threshold
let _ = write_wakeup_threshold(old_threshold);
}
#[test]
fn test_uuid_fns() {
let uuid = uuid().unwrap();
let boot_id = boot_id().unwrap();
println!("UUID: {}", uuid);
println!("boot UUID: {}", boot_id);
}
}

11
vendor/procfs/src/sys/mod.rs vendored Normal file
View file

@ -0,0 +1,11 @@
//! Sysctl is a means of configuring certain aspects of the kernel at run-time,
//! and the `/proc/sys/` directory is there so that you don't even need special tools to do it!
//!
//! This directory (present since 1.3.57) contains a number of files
//! and subdirectories corresponding to kernel variables.
//! These variables can be read and sometimes modified using the `/proc` filesystem,
//! and the (deprecated) sysctl(2) system call.
pub mod fs;
pub mod kernel;
pub mod vm;

141
vendor/procfs/src/sys/vm.rs vendored Normal file
View file

@ -0,0 +1,141 @@
//! Memory management tuning buffer and cache management
//!
//! The files in this directory can be used to tune
//! the operation of the virtual memory (VM) subsystem of the Linux kernel
//! and the write out of dirty data to disk.
use std::fmt;
use std::str;
use crate::{read_value, write_value, ProcResult};
/// The amount of free memory in the system that should be reserved for users with the capability cap_sys_admin.
///
/// # Example
///
/// ```
/// use procfs::sys::vm::admin_reserve_kbytes;
///
/// assert_ne!(admin_reserve_kbytes().unwrap(), 0);
/// ```
pub fn admin_reserve_kbytes() -> ProcResult<usize> {
read_value("/proc/sys/vm/admin_reserve_kbytes")
}
/// Set the amount of free memory in the system that should be reserved for users with the capability cap_sys_admin.
pub fn set_admin_reserve_kbytes(kbytes: usize) -> ProcResult<()> {
write_value("/proc/sys/vm/admin_reserve_kbytes", kbytes)
}
/// Force all zones are compacted such that free memory is available in contiguous blocks where possible.
///
/// This can be important for example in the allocation of huge pages
/// although processes will also directly compact memory as required.
///
/// Present only if the kernel was configured with CONFIG_COMPACTION.
pub fn compact_memory() -> ProcResult<()> {
write_value("/proc/sys/vm/compact_memory", 1)
}
/// drop clean caches, dentries, and inodes from memory, causing that memory to become free.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DropCache {
/// default
Default = 0,
/// free pagecache
PageCache = 1,
/// free dentries and inodes
Inodes = 2,
/// free pagecache, dentries and inodes
All = 3,
/// disable
Disable = 4,
}
impl fmt::Display for DropCache {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
match self {
DropCache::Default => 0,
DropCache::PageCache => 1,
DropCache::Inodes => 2,
DropCache::All => 3,
DropCache::Disable => 4,
}
)
}
}
impl str::FromStr for DropCache {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse().map_err(|_| "Fail to parse drop cache").and_then(|n| match n {
0 => Ok(DropCache::Default),
1 => Ok(DropCache::PageCache),
2 => Ok(DropCache::Inodes),
3 => Ok(DropCache::All),
4 => Ok(DropCache::Disable),
_ => Err("Unknown drop cache value"),
})
}
}
/// Causes the kernel to drop clean caches, dentries, and inodes from memory,
/// causing that memory to become free.
///
/// This can be useful for memory management testing and performing reproducible filesystem benchmarks.
/// Because writing to this file causes the benefits of caching to be lost,
/// it can degrade overall system performance.
pub fn drop_caches(drop: DropCache) -> ProcResult<()> {
write_value("/proc/sys/vm/drop_caches", drop)
}
/// The maximum number of memory map areas a process may have.
///
/// Memory map areas are used as a side-effect of calling malloc,
/// directly by mmap, mprotect, and madvise, and also when loading shared libraries.
///
/// # Example
///
/// ```
/// use procfs::sys::vm::max_map_count;
///
/// assert_ne!(max_map_count().unwrap(), 0);
/// ```
pub fn max_map_count() -> ProcResult<u64> {
read_value("/proc/sys/vm/max_map_count")
}
/// Set the maximum number of memory map areas a process may have.
///
/// Memory map areas are used as a side-effect of calling malloc,
/// directly by mmap, mprotect, and madvise, and also when loading shared libraries.
pub fn set_max_map_count(count: u64) -> ProcResult<()> {
write_value("/proc/sys/vm/max_map_count", count)
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn test() {
use std::path::Path;
if Path::new("/proc/sys/vm/admin_reserve_kbytes").exists() {
admin_reserve_kbytes().unwrap();
}
if Path::new("/proc/sys/vm/max_map_count").exists() {
max_map_count().unwrap();
}
for v in 0..5 {
let s = format!("{}", v);
let dc = DropCache::from_str(&s).unwrap();
assert_eq!(format!("{}", dc), s);
}
}
}

111
vendor/procfs/src/sysvipc_shm.rs vendored Normal file
View file

@ -0,0 +1,111 @@
use std::io;
use super::{FileWrapper, ProcResult};
use std::str::FromStr;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// A shared memory segment parsed from `/proc/sysvipc/shm`
/// Relation with `[crate::process::process::MMapPath::Vsys]`
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[allow(non_snake_case)]
pub struct Shm {
/// Segment key
pub key: i32,
/// Segment ID, unique
pub shmid: u64,
/// Access permissions, as octal
pub perms: u16,
/// Size in bytes
pub size: u32,
/// Creator PID
pub cpid: i32,
/// Last operator PID
pub lpid: i32,
/// Number of attached processes
pub nattch: u32,
/// User ID
pub uid: u16,
/// Group ID
pub gid: u16,
/// Creator UID
pub cuid: u16,
/// Creator GID
pub cgid: u16,
/// Time of last `shmat` (attach), epoch
pub atime: u64,
/// Time of last `shmdt` (detach), epoch
pub dtime: u64,
/// Time of last permission change, epoch
pub ctime: u64,
/// Current part of the shared memory resident in memory
pub rss: u64,
/// Current part of the shared memory in SWAP
pub swap: u64,
}
impl Shm {
/// Reads and parses the `/proc/sysvipc/shm`, returning an error if there are problems.
pub fn new() -> ProcResult<Vec<Shm>> {
let f = FileWrapper::open("/proc/sysvipc/shm")?;
Shm::from_reader(f)
}
/// Get Meminfo from a custom Read instead of the default `/proc/sysvipc/shm`.
pub fn from_reader<R: io::Read>(r: R) -> ProcResult<Vec<Shm>> {
use std::io::{BufRead, BufReader};
let reader = BufReader::new(r);
let mut vec = Vec::new();
// See printing code here:
// https://elixir.bootlin.com/linux/latest/source/ipc/shm.c#L1737
for line in reader.lines().skip(1) {
let line = expect!(line);
let mut s = line.split_whitespace();
let key = expect!(i32::from_str(expect!(s.next())));
let shmid = expect!(u64::from_str(expect!(s.next())));
let perms = expect!(u16::from_str(expect!(s.next())));
let size = expect!(u32::from_str(expect!(s.next())));
let cpid = expect!(i32::from_str(expect!(s.next())));
let lpid = expect!(i32::from_str(expect!(s.next())));
let nattch = expect!(u32::from_str(expect!(s.next())));
let uid = expect!(u16::from_str(expect!(s.next())));
let gid = expect!(u16::from_str(expect!(s.next())));
let cuid = expect!(u16::from_str(expect!(s.next())));
let cgid = expect!(u16::from_str(expect!(s.next())));
let atime = expect!(u64::from_str(expect!(s.next())));
let dtime = expect!(u64::from_str(expect!(s.next())));
let ctime = expect!(u64::from_str(expect!(s.next())));
let rss = expect!(u64::from_str(expect!(s.next())));
let swap = expect!(u64::from_str(expect!(s.next())));
let shm = Shm {
key,
shmid,
perms,
size,
cpid,
lpid,
nattch,
uid,
gid,
cuid,
cgid,
atime,
dtime,
ctime,
rss,
swap,
};
vec.push(shm);
}
Ok(vec)
}
}

70
vendor/procfs/src/uptime.rs vendored Normal file
View file

@ -0,0 +1,70 @@
use crate::{FileWrapper, ProcResult};
use std::io::Read;
use std::str::FromStr;
use std::time::Duration;
/// The uptime of the system, based on the `/proc/uptime` file.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct Uptime {
/// The uptime of the system (including time spent in suspend).
pub uptime: f64,
/// The sum of how much time each core has spent idle.
pub idle: f64,
}
impl Uptime {
pub fn new() -> ProcResult<Uptime> {
let file = FileWrapper::open("/proc/uptime")?;
Uptime::from_reader(file)
}
pub fn from_reader<R: Read>(mut r: R) -> ProcResult<Uptime> {
let mut buf = Vec::with_capacity(128);
r.read_to_end(&mut buf)?;
let line = String::from_utf8_lossy(&buf);
let buf = line.trim();
let mut s = buf.split(' ');
let uptime = expect!(f64::from_str(expect!(s.next())));
let idle = expect!(f64::from_str(expect!(s.next())));
Ok(Uptime { uptime, idle })
}
/// The uptime of the system (including time spent in suspend).
pub fn uptime_duration(&self) -> Duration {
let secs = self.uptime.trunc() as u64;
let csecs = (self.uptime.fract() * 100.0).round() as u32;
let nsecs = csecs * 10_000_000;
Duration::new(secs, nsecs)
}
/// The sum of how much time each core has spent idle.
pub fn idle_duration(&self) -> Duration {
let secs = self.idle.trunc() as u64;
let csecs = (self.idle.fract() * 100.0).round() as u32;
let nsecs = csecs * 10_000_000;
Duration::new(secs, nsecs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
#[test]
fn test_uptime() {
let reader = Cursor::new(b"2578790.61 1999230.98\n");
let uptime = Uptime::from_reader(reader).unwrap();
assert_eq!(uptime.uptime_duration(), Duration::new(2578790, 610_000_000));
assert_eq!(uptime.idle_duration(), Duration::new(1999230, 980_000_000));
}
}

258
vendor/procfs/support.md vendored Normal file
View file

@ -0,0 +1,258 @@
# Supported features
This is an approximate list of all the files under the `/proc` mount, and an indication if that feature/file is supported by the `procfs` crate. Help is needed to keep this file up-to-date, so please open an issue or pull request if you spot something that's not right.
* [ ] `/proc/[pid]`
* [ ] `/proc/[pid]/attr`
* [ ] `/proc/[pid]/attr/current`
* [ ] `/proc/[pid]/attr/exec`
* [ ] `/proc/[pid]/attr/fscreate`
* [ ] `/proc/[pid]/attr/keycreate`
* [ ] `/proc/[pid]/attr/prev`
* [ ] `/proc/[pid]/attr/socketcreate`
* [x] `/proc/[pid]/autogroup`
* [x] `/proc/[pid]/auxv`
* [x] `/proc/[pid]/cgroup`
* [ ] `/proc/[pid]/clear_refs`
* [x] `/proc/[pid]/cmdline`
* [x] `/proc/[pid]/comm`
* [x] `/proc/[pid]/coredump_filter`
* [ ] `/proc/[pid]/cpuset`
* [x] `/proc/[pid]/cwd`
* [x] `/proc/[pid]/environ`
* [x] `/proc/[pid]/exe`
* [x] `/proc/[pid]/fd/`
* [ ] `/proc/[pid]/fdinfo/`
* [ ] `/proc/[pid]/gid_map`
* [x] `/proc/[pid]/io`
* [x] `/proc/[pid]/limits`
* [ ] `/proc/[pid]/map_files/`
* [x] `/proc/[pid]/maps`
* [x] `/proc/[pid]/mem`
* [x] `/proc/[pid]/mountinfo`
* [ ] `/proc/[pid]/mounts`
* [x] `/proc/[pid]/mountstats`
* [x] `/proc/[pid]/ns/`
* [ ] `/proc/[pid]/numa_maps`
* [ ] `/proc/[pid]/oom_adj`
* [x] `/proc/[pid]/oom_score`
* [ ] `/proc/[pid]/oom_score_adj`
* [ ] `/proc/[pid]/pagemap`
* [ ] `/proc/[pid]/personality`
* [x] `/proc/[pid]/root`
* [ ] `/proc/[pid]/seccomp`
* [ ] `/proc/[pid]/setgroups`
* [ ] `/proc/[pid]/sched_autogroup_enabled`
* [x] `/proc/[pid]/smaps`
* [x] `/proc/[pid]/smaps_rollup`
* [ ] `/proc/[pid]/stack`
* [x] `/proc/[pid]/stat`
* [x] `/proc/[pid]/statm`
* [x] `/proc/[pid]/status`
* [ ] `/proc/[pid]/syscall`
* [ ] `/proc/[pid]/task`
* [x] `/proc/[pid]/task/[tid]/stat`
* [x] `/proc/[pid]/task/[tid]/status`
* [x] `/proc/[pid]/task/[tid]/io`
* [x] `/proc/[pid]/task/[tid]/children`
* [ ] `/proc/[pid]/timers`
* [ ] `/proc/[pid]/timerslack_ns`
* [ ] `/proc/[pid]/uid_map`
* [ ] `/proc/[pid]/gid_map`
* [x] `/proc/[pid]/wchan`
* [ ] `/proc/apm`
* [ ] `/proc/buddyinfo`
* [ ] `/proc/bus`
* [ ] `/proc/bus/pccard`
* [ ] `/proc/bus/pccard/drivers`
* [ ] `/proc/bus/pci`
* [ ] `/proc/bus/pci/devices`
* [x] `/proc/cmdline`
* [ ] `/proc/config.gz`
* [ ] `/proc/crypto`
* [ ] `/proc/cpuinfo`
* [ ] `/proc/devices`
* [x] `/proc/diskstats`
* [ ] `/proc/dma`
* [ ] `/proc/driver`
* [ ] `/proc/execdomains`
* [ ] `/proc/fb`
* [ ] `/proc/filesystems`
* [ ] `/proc/fs`
* [ ] `/proc/ide`
* [ ] `/proc/interrupts`
* [ ] `/proc/iomem`
* [ ] `/proc/ioports`
* [ ] `/proc/kallsyms`
* [ ] `/proc/kcore`
* [x] `/proc/keys`
* [x] `/proc/key-users`
* [ ] `/proc/kmsg`
* [ ] `/proc/kpagecgroup`
* [ ] `/proc/kpagecgroup`
* [ ] `/proc/kpagecount`
* [ ] `/proc/kpageflags`
* [ ] `/proc/ksyms`
* [x] `/proc/loadavg`
* [x] `/proc/locks`
* [ ] `/proc/malloc`
* [x] `/proc/meminfo`
* [x] `/proc/modules`
* [ ] `/proc/mounts`
* [ ] `/proc/mtrr`
* [ ] `/proc/net`
* [x] `/proc/net/arp`
* [x] `/proc/net/dev`
* [ ] `/proc/net/dev_mcast`
* [ ] `/proc/net/igmp`
* [ ] `/proc/net/ipv6_route`
* [ ] `/proc/net/rarp`
* [ ] `/proc/net/raw`
* [x] `/proc/net/route`
* [ ] `/proc/net/snmp`
* [x] `/proc/net/tcp`
* [x] `/proc/net/udp`
* [x] `/proc/net/unix`
* [ ] `/proc/net/netfilter/nfnetlink_queue`
* [ ] `/proc/partitions`
* [ ] `/proc/pci`
* [x] `/proc/pressure`
* [x] `/proc/pressure/cpu`
* [x] `/proc/pressure/io`
* [x] `/proc/pressure/memory`
* [ ] `/proc/profile`
* [ ] `/proc/scsi`
* [ ] `/proc/scsi/scsi`
* [ ] `/proc/scsi/[drivername]`
* [ ] `/proc/self`
* [ ] `/proc/slabinfo`
* [x] `/proc/stat`
* [ ] `/proc/swaps`
* [ ] `/proc/sys`
* [ ] `/proc/sys/abi`
* [ ] `/proc/sys/debug`
* [ ] `/proc/sys/dev`
* [ ] `/proc/sys/fs`
* [x] `/proc/sys/fs/binfmt_misc`
* [x] `/proc/sys/fs/dentry-state`
* [ ] `/proc/sys/fs/dir-notify-enable`
* [ ] `/proc/sys/fs/dquot-max`
* [ ] `/proc/sys/fs/dquot-nr`
* [x] `/proc/sys/fs/epoll`
* [x] `/proc/sys/fs/file-max`
* [x] `/proc/sys/fs/file-nr`
* [ ] `/proc/sys/fs/inode-max`
* [ ] `/proc/sys/fs/inode-nr`
* [ ] `/proc/sys/fs/inode-state`
* [ ] `/proc/sys/fs/inotify`
* [ ] `/proc/sys/fs/lease-break-time`
* [ ] `/proc/sys/fs/leases-enable`
* [ ] `/proc/sys/fs/mount-max`
* [ ] `/proc/sys/fs/mqueue`
* [ ] `/proc/sys/fs/nr_open`
* [ ] `/proc/sys/fs/overflowgid`
* [ ] `/proc/sys/fs/overflowuid`
* [ ] `/proc/sys/fs/pipe-max-size`
* [ ] `/proc/sys/fs/pipe-user-pages-hard`
* [ ] `/proc/sys/fs/pipe-user-pages-soft`
* [ ] `/proc/sys/fs/protected_hardlinks`
* [ ] `/proc/sys/fs/protected_symlinks`
* [ ] `/proc/sys/fs/suid_dumpable`
* [ ] `/proc/sys/fs/super-max`
* [ ] `/proc/sys/fs/super-nr`
* [ ] `/proc/sys/kernel`
* [ ] `/proc/sys/kernel/acct`
* [ ] `/proc/sys/kernel/auto_msgmni`
* [ ] `/proc/sys/kernel/cap_last_cap`
* [ ] `/proc/sys/kernel/cap-bound`
* [ ] `/proc/sys/kernel/core_pattern`
* [ ] `/proc/sys/kernel/core_pipe_limit`
* [ ] `/proc/sys/kernel/core_uses_pid`
* [ ] `/proc/sys/kernel/ctrl-alt-del`
* [ ] `/proc/sys/kernel/dmesg_restrict`
* [ ] `/proc/sys/kernel/domainname`
* [ ] `/proc/sys/kernel/hostname`
* [ ] `/proc/sys/kernel/hotplug`
* [ ] `/proc/sys/kernel/htab-reclaim`
* [x] `/proc/sys/kernel/keys/\*`
* [ ] `/proc/sys/kernel/kptr_restrict`
* [ ] `/proc/sys/kernel/l2cr`
* [ ] `/proc/sys/kernel/modprobe`
* [ ] `/proc/sys/kernel/modules_disabled`
* [ ] `/proc/sys/kernel/msgmax`
* [ ] `/proc/sys/kernel/msgmni`
* [ ] `/proc/sys/kernel/msgmnb`
* [ ] `/proc/sys/kernel/ngroups_max`
* [ ] `/proc/sys/kernel/ns_last_pid`
* [x] `/proc/sys/kernel/ostype`
* [x] `/proc/sys/kernel/osrelease`
* [ ] `/proc/sys/kernel/overflowgid`
* [ ] `/proc/sys/kernel/overflowuid`
* [ ] `/proc/sys/kernel/panic`
* [ ] `/proc/sys/kernel/panic_on_oops`
* [x] `/proc/sys/kernel/pid_max`
* [ ] `/proc/sys/kernel/powersave-nap`
* [ ] `/proc/sys/kernel/printk`
* [ ] `/proc/sys/kernel/pty`
* [ ] `/proc/sys/kernel/pty/max`
* [ ] `/proc/sys/kernel/pty/nr`
* [x] `/proc/sys/kernel/random`
* [x] `/proc/sys/kernel/random/entropy_avail`
* [x] `/proc/sys/kernel/random/poolsize`
* [x] `/proc/sys/kernel/random/read_wakeup_threshold`
* [x] `/proc/sys/kernel/random/write_wakeup_threshold`
* [x] `/proc/sys/kernel/random/uuid`
* [x] `/proc/sys/kernel/random/boot_id`
* [ ] `/proc/sys/kernel/randomize_va_space`
* [ ] `/proc/sys/kernel/real-root-dev`
* [ ] `/proc/sys/kernel/reboot-cmd`
* [ ] `/proc/sys/kernel/rtsig-max`
* [ ] `/proc/sys/kernel/rtsig-nr`
* [ ] `/proc/sys/kernel/sched_child_runs_first`
* [ ] `/proc/sys/kernel/sched_rr_timeslice_ms`
* [ ] `/proc/sys/kernel/sched_rt_period_us`
* [ ] `/proc/sys/kernel/sched_rt_runtime_us`
* [ ] `/proc/sys/kernel/seccomp`
* [x] `/proc/sys/kernel/sem`
* [ ] `/proc/sys/kernel/sg-big-buff`
* [ ] `/proc/sys/kernel/shm_rmid_forced`
* [x] `/proc/sys/kernel/shmall`
* [x] `/proc/sys/kernel/shmmax`
* [x] `/proc/sys/kernel/shmmni`
* [ ] `/proc/sys/kernel/sysctl_writes_strict`
* [x] `/proc/sys/kernel/sysrq`
* [x] `/proc/sys/kernel/version`
* [x] `/proc/sys/kernel/threads-max`
* [ ] `/proc/sys/kernel/yama/ptrace_scope`
* [ ] `/proc/sys/kernel/zero-paged`
* [ ] `/proc/sys/net`
* [ ] `/proc/sys/net/core/bpf_jit_enable`
* [ ] `/proc/sys/net/core/somaxconn`
* [ ] `/proc/sys/proc`
* [ ] `/proc/sys/sunrpc`
* [ ] `/proc/sys/user`
* [ ] `/proc/sys/vm`
* [x] `/proc/sys/vm/admin_reserve_kbytes`
* [ ] `/proc/sys/vm/compact_memory`
* [x] `/proc/sys/vm/drop_caches`
* [ ] `/proc/sys/vm/legacy_va_layout`
* [ ] `/proc/sys/vm/memory_failure_early_kill`
* [ ] `/proc/sys/vm/memory_failure_recovery`
* [ ] `/proc/sys/vm/oom_dump_tasks`
* [ ] `/proc/sys/vm/oom_kill_allocating_task`
* [ ] `/proc/sys/vm/overcommit_kbytes`
* [x] `/proc/sys/vm/overcommit_memory`
* [ ] `/proc/sys/vm/overcommit_ratio`
* [ ] `/proc/sys/vm/panic_on_oom`
* [ ] `/proc/sys/vm/swappiness`
* [ ] `/proc/sys/vm/user_reserve_kbytes`
* [ ] `/proc/sysrq-trigger`
* [ ] `/proc/sysvipc`
* [ ] `/proc/thread-self`
* [ ] `/proc/timer_list`
* [ ] `/proc/timer_stats`
* [ ] `/proc/tty`
* [x] `/proc/uptime`
* [ ] `/proc/version`
* [x] `/proc/vmstat`
* [ ] `/proc/zoneinfo`