Vendor dependencies
Let's see how I like this workflow.
This commit is contained in:
parent
34d1830413
commit
9c435dc440
7500 changed files with 1665121 additions and 99 deletions
144
vendor/procfs/examples/README.md
vendored
Normal file
144
vendor/procfs/examples/README.md
vendored
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
# Examples
|
||||
|
||||
These examples can be run by running `cargo run --example example_name`
|
||||
|
||||
## dump.rs
|
||||
|
||||
Prints out details about the current process (the dumper itself), or a process specifed by PID
|
||||
|
||||
## interface_stats.rs
|
||||
|
||||
Runs continually and prints out how many bytes/packets are sent/received. Press ctrl-c to exit the example:
|
||||
|
||||
```text
|
||||
Interface: bytes recv bytes sent
|
||||
================ ==================== ====================
|
||||
br-883c4c992deb: 823307769 0.2 kbps 1537694158 0.5 kbps
|
||||
br-d73af6e6d094: 9137600399 0.9 kbps 2334717319 0.4 kbps
|
||||
docker0: 2938964881 0.6 kbps 19291691656 11.4 kbps
|
||||
docker_gwbridge: 1172300 0.0 kbps 15649536 0.0 kbps
|
||||
enp5s0f0: 44643307888420 5599.8 kbps 1509415976135 99.0 kbps
|
||||
enp5s0f1: 0 0.0 kbps 0 0.0 kbps
|
||||
lo: 161143108162 0.4 kbps 161143108162 0.4 kbps
|
||||
veth3154ff3: 3809619534 1.0 kbps 867529906 0.4 kbps
|
||||
veth487bc9b: 2650532684 0.8 kbps 2992458899 0.9 kbps
|
||||
veth8cb8ca8: 3234030733 0.7 kbps 16921098378 11.4 kbps
|
||||
vethbadbe14: 12007615348 3.8 kbps 15583195644 5.0 kbps
|
||||
vethc152f93: 978828 0.0 kbps 3839134 0.0 kbps
|
||||
vethe481f30: 1637142 0.0 kbps 15805768 0.0 kbps
|
||||
vethfac2e83: 19445827683 6.2 kbps 16194181515 5.1 kbps
|
||||
|
||||
```
|
||||
|
||||
## netstat.rs
|
||||
|
||||
Prints out all open and listening TCP/UDP sockets, along with the owning process. The
|
||||
output format is very similar to the standard `netstat` linux utility:
|
||||
|
||||
```text
|
||||
Local address Remote address State Inode PID/Program name
|
||||
0.0.0.0:53 0.0.0.0:0 Listen 30883 1409/pdns_server
|
||||
0.0.0.0:51413 0.0.0.0:0 Listen 24263 927/transmission-da
|
||||
0.0.0.0:35445 0.0.0.0:0 Listen 21777 942/rpc.mountd
|
||||
0.0.0.0:22 0.0.0.0:0 Listen 27973 1149/sshd
|
||||
0.0.0.0:25 0.0.0.0:0 Listen 28295 1612/master
|
||||
```
|
||||
|
||||
## pressure.rs
|
||||
|
||||
Prints out CPU/IO/Memory pressure information
|
||||
|
||||
## ps.rs
|
||||
|
||||
Prints out all processes that share the same tty as the current terminal. This is very similar to the standard
|
||||
`ps` utility on linux when run with no arguments:
|
||||
|
||||
```text
|
||||
PID TTY TIME CMD
|
||||
8369 pty/13 4.05 bash
|
||||
23124 pty/13 0.23 basic-http-serv
|
||||
24206 pty/13 0.11 ps
|
||||
```
|
||||
|
||||
## self_memory.rs
|
||||
|
||||
Shows several ways to get the current memory usage of the current process
|
||||
|
||||
```text
|
||||
PID: 21867
|
||||
Memory page size: 4096
|
||||
== Data from /proc/self/stat:
|
||||
Total virtual memory used: 3436544 bytes
|
||||
Total resident set: 220 pages (901120 bytes)
|
||||
|
||||
== Data from /proc/self/statm:
|
||||
Total virtual memory used: 839 pages (3436544 bytes)
|
||||
Total resident set: 220 pages (901120 byte)s
|
||||
Total shared memory: 191 pages (782336 bytes)
|
||||
|
||||
== Data from /proc/self/status:
|
||||
Total virtual memory used: 3436544 bytes
|
||||
Total resident set: 901120 bytes
|
||||
Total shared memory: 782336 bytes
|
||||
```
|
||||
|
||||
## lsmod.rs
|
||||
|
||||
This lists all the loaded kernel modules, in a simple tree format.
|
||||
|
||||
## diskstat.rs
|
||||
|
||||
Lists IO information for local disks:
|
||||
|
||||
```text
|
||||
sda1 mounted on /:
|
||||
total reads: 7325390 (13640070 ms)
|
||||
total writes: 124191552 (119109541 ms)
|
||||
total flushes: 0 (0 ms)
|
||||
```
|
||||
|
||||
Note: only local disks will be shown (not NFS mounts,
|
||||
and disks used for ZFS will not be shown either).
|
||||
|
||||
## lslocks.rs
|
||||
|
||||
Shows current file locks in a format that is similiar to the `lslocks` utility.
|
||||
|
||||
## mountinfo.rs
|
||||
|
||||
Lists all mountpoints, along with their type and options:
|
||||
|
||||
```text
|
||||
sysfs on /sys type sysfs (noexec,relatime,nodev,rw,nosuid)
|
||||
proc on /proc type proc (noexec,rw,nodev,relatime,nosuid)
|
||||
udev on /dev type devtmpfs (rw,nosuid,relatime)
|
||||
mode = 755
|
||||
nr_inodes = 4109298
|
||||
size = 16437192k
|
||||
devpts on /dev/pts type devpts (nosuid,rw,noexec,relatime)
|
||||
gid = 5
|
||||
ptmxmode = 000
|
||||
mode = 620
|
||||
tmpfs on /run type tmpfs (rw,nosuid,noexec,relatime)
|
||||
size = 3291852k
|
||||
mode = 755
|
||||
/dev/sda1 on / type ext4 (rw,relatime)
|
||||
errors = remount-ro
|
||||
```
|
||||
|
||||
## process_hierarchy.rs
|
||||
|
||||
Lists all processes as a tree. Sub-processes will be hierarchically ordered beneath their parents.
|
||||
|
||||
```text
|
||||
1 /usr/lib/systemd/systemd --system --deserialize 54
|
||||
366 /usr/lib/systemd/systemd-journald
|
||||
375 /usr/lib/systemd/systemd-udevd
|
||||
383 /usr/bin/lvmetad -f
|
||||
525 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation --syslog-only
|
||||
529 /usr/bin/syncthing -no-browser -no-restart -logflags=0
|
||||
608 /usr/bin/syncthing -no-browser -no-restart -logflags=0
|
||||
530 /usr/lib/systemd/systemd-logind
|
||||
...
|
||||
```
|
||||
|
||||
31
vendor/procfs/examples/diskstat.rs
vendored
Normal file
31
vendor/procfs/examples/diskstat.rs
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
use procfs::{diskstats, process::Process, DiskStat};
|
||||
use std::collections::HashMap;
|
||||
use std::iter::FromIterator;
|
||||
|
||||
fn main() {
|
||||
let me = Process::myself().unwrap();
|
||||
let mounts = me.mountinfo().unwrap();
|
||||
|
||||
// Get a list of all disks that we have IO stat info on
|
||||
let disk_stats: HashMap<(i32, i32), DiskStat> =
|
||||
HashMap::from_iter(diskstats().unwrap().into_iter().map(|i| ((i.major, i.minor), i)));
|
||||
|
||||
for mount in mounts {
|
||||
// parse the majmin string (something like "0:3") into an (i32, i32) tuple
|
||||
let (maj, min): (i32, i32) = {
|
||||
let mut s = mount.majmin.split(':');
|
||||
(s.next().unwrap().parse().unwrap(), s.next().unwrap().parse().unwrap())
|
||||
};
|
||||
|
||||
if let Some(stat) = disk_stats.get(&(maj, min)) {
|
||||
println!("{} mounted on {}:", stat.name, mount.mount_point.display());
|
||||
println!(" total reads: {} ({} ms)", stat.reads, stat.time_reading);
|
||||
println!(" total writes: {} ({} ms)", stat.writes, stat.time_writing);
|
||||
println!(
|
||||
" total flushes: {} ({} ms)",
|
||||
stat.flushes.unwrap_or(0),
|
||||
stat.time_flushing.unwrap_or(0)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
17
vendor/procfs/examples/dump.rs
vendored
Normal file
17
vendor/procfs/examples/dump.rs
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
extern crate procfs;
|
||||
|
||||
fn main() {
|
||||
let pid = std::env::args().nth(1).and_then(|s| s.parse::<i32>().ok());
|
||||
|
||||
let prc = if let Some(pid) = pid {
|
||||
println!("Info for pid={}", pid);
|
||||
procfs::process::Process::new(pid).unwrap()
|
||||
} else {
|
||||
procfs::process::Process::myself().unwrap()
|
||||
};
|
||||
println!("{:#?}", prc);
|
||||
|
||||
let stat = prc.stat().unwrap();
|
||||
println!("State: {:?}", stat.state());
|
||||
println!("RSS: {} bytes", stat.rss_bytes().unwrap());
|
||||
}
|
||||
41
vendor/procfs/examples/interface_stats.rs
vendored
Normal file
41
vendor/procfs/examples/interface_stats.rs
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
//! For each interface, display the number of bytes sent and received, along with a data rate
|
||||
|
||||
fn main() {
|
||||
let delay = std::time::Duration::from_secs(2);
|
||||
|
||||
let mut prev_stats = procfs::net::dev_status().unwrap();
|
||||
let mut prev_now = std::time::Instant::now();
|
||||
loop {
|
||||
std::thread::sleep(delay);
|
||||
let now = std::time::Instant::now();
|
||||
let dev_stats = procfs::net::dev_status().unwrap();
|
||||
|
||||
// calculate diffs from previous
|
||||
let dt = (now - prev_now).as_millis() as f32 / 1000.0;
|
||||
|
||||
let mut stats: Vec<_> = dev_stats.values().collect();
|
||||
stats.sort_by_key(|s| &s.name);
|
||||
println!();
|
||||
println!(
|
||||
"{:>16}: {:<20} {:<20} ",
|
||||
"Interface", "bytes recv", "bytes sent"
|
||||
);
|
||||
println!(
|
||||
"{:>16} {:<20} {:<20}",
|
||||
"================", "====================", "===================="
|
||||
);
|
||||
for stat in stats {
|
||||
println!(
|
||||
"{:>16}: {:<20} {:>6.1} kbps {:<20} {:>6.1} kbps ",
|
||||
stat.name,
|
||||
stat.recv_bytes,
|
||||
(stat.recv_bytes - prev_stats.get(&stat.name).unwrap().recv_bytes) as f32 / dt / 1000.0,
|
||||
stat.sent_bytes,
|
||||
(stat.sent_bytes - prev_stats.get(&stat.name).unwrap().sent_bytes) as f32 / dt / 1000.0
|
||||
);
|
||||
}
|
||||
|
||||
prev_stats = dev_stats;
|
||||
prev_now = now;
|
||||
}
|
||||
}
|
||||
67
vendor/procfs/examples/lslocks.rs
vendored
Normal file
67
vendor/procfs/examples/lslocks.rs
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
use procfs::process::{FDTarget, Process};
|
||||
use rustix::fs::AtFlags;
|
||||
use std::path::Path;
|
||||
|
||||
fn main() {
|
||||
let myself = Process::myself().unwrap();
|
||||
let mountinfo = myself.mountinfo().unwrap();
|
||||
|
||||
for lock in procfs::locks().unwrap() {
|
||||
lock.pid
|
||||
.and_then(|pid| Process::new(pid).ok())
|
||||
.and_then(|proc| proc.cmdline().ok())
|
||||
.and_then(|mut cmd| cmd.drain(..).next())
|
||||
.map_or_else(
|
||||
|| {
|
||||
print!("{:18}", "(undefined)");
|
||||
},
|
||||
|s| {
|
||||
let p = Path::new(&s);
|
||||
print!("{:18}", p.file_name().unwrap_or(p.as_os_str()).to_string_lossy());
|
||||
},
|
||||
);
|
||||
|
||||
print!("{:<12} ", lock.pid.unwrap_or(-1));
|
||||
print!("{:12} ", lock.lock_type.as_str());
|
||||
print!("{:12} ", lock.mode.as_str());
|
||||
print!("{:12} ", lock.kind.as_str());
|
||||
|
||||
// try to find the path for this inode
|
||||
let mut found = false;
|
||||
if let Some(pid) = lock.pid {
|
||||
if let Ok(fds) = Process::new(pid).and_then(|p| p.fd()) {
|
||||
for f in fds {
|
||||
let fd = f.unwrap();
|
||||
if let FDTarget::Path(p) = fd.target {
|
||||
if let Ok(stat) = rustix::fs::statat(&rustix::fs::cwd(), &p, AtFlags::empty()) {
|
||||
if stat.st_ino as u64 == lock.inode {
|
||||
print!("{}", p.display());
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
// we don't have a PID or we don't have permission to inspect the processes files, but we still have the device and inode
|
||||
// There's no way to look up a path from an inode, so just bring the device mount point
|
||||
for mount in &mountinfo {
|
||||
if format!("{}:{}", lock.devmaj, lock.devmin) == mount.majmin {
|
||||
print!("{}...", mount.mount_point.display());
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
// still not found? print the device
|
||||
print!("{}:{}", lock.devmaj, lock.devmin);
|
||||
}
|
||||
|
||||
println!();
|
||||
}
|
||||
}
|
||||
30
vendor/procfs/examples/lsmod.rs
vendored
Normal file
30
vendor/procfs/examples/lsmod.rs
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
fn print(name: &str, indent: usize, mods: &HashMap<&str, Vec<&str>>) {
|
||||
println!("{}{} {}", if indent == 0 { "-" } else { " " }, " ".repeat(indent), name);
|
||||
|
||||
if let Some(uses_list) = mods.get(name) {
|
||||
for name in uses_list {
|
||||
print(name, indent + 2, mods);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let modules = procfs::modules().unwrap();
|
||||
|
||||
// each module has a list of what other modules use it. Let's invert this and create a list of the modules used by each module.
|
||||
// This maps a module name to a list of modules that it uses
|
||||
let mut map: HashMap<&str, Vec<&str>> = HashMap::new();
|
||||
|
||||
for module in modules.values() {
|
||||
for name in &module.used_by {
|
||||
map.entry(name).or_default().push(&module.name);
|
||||
}
|
||||
}
|
||||
|
||||
// println!("{:?}", map["xt_policy"]);
|
||||
for modname in map.keys() {
|
||||
print(modname, 0, &map);
|
||||
}
|
||||
}
|
||||
28
vendor/procfs/examples/mountinfo.rs
vendored
Normal file
28
vendor/procfs/examples/mountinfo.rs
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
use procfs::process::Process;
|
||||
use std::collections::HashSet;
|
||||
|
||||
fn main() {
|
||||
for mount in Process::myself().unwrap().mountinfo().unwrap() {
|
||||
let (a, b): (HashSet<_>, HashSet<_>) = mount
|
||||
.mount_options
|
||||
.into_iter()
|
||||
.chain(mount.super_options)
|
||||
.partition(|&(_, ref m)| m.is_none());
|
||||
|
||||
println!(
|
||||
"{} on {} type {} ({})",
|
||||
mount.mount_source.unwrap_or_else(|| "None".to_string()),
|
||||
mount.mount_point.display(),
|
||||
mount.fs_type,
|
||||
a.into_iter().map(|(k, _)| k).collect::<Vec<_>>().join(",")
|
||||
);
|
||||
|
||||
for (opt, val) in b {
|
||||
if let Some(val) = val {
|
||||
println!(" {} = {}", opt, val);
|
||||
} else {
|
||||
println!(" {}", opt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
53
vendor/procfs/examples/netstat.rs
vendored
Normal file
53
vendor/procfs/examples/netstat.rs
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
#![allow(clippy::print_literal)]
|
||||
|
||||
extern crate procfs;
|
||||
|
||||
use procfs::process::{FDTarget, Stat};
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
fn main() {
|
||||
// get all processes
|
||||
let all_procs = procfs::process::all_processes().unwrap();
|
||||
|
||||
// build up a map between socket inodes and processes:
|
||||
let mut map: HashMap<u64, Stat> = HashMap::new();
|
||||
for p in all_procs {
|
||||
let process = p.unwrap();
|
||||
if let (Ok(stat), Ok(fds)) = (process.stat(), process.fd()) {
|
||||
for fd in fds {
|
||||
if let FDTarget::Socket(inode) = fd.unwrap().target {
|
||||
map.insert(inode, stat.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get the tcp table
|
||||
let tcp = procfs::net::tcp().unwrap();
|
||||
let tcp6 = procfs::net::tcp6().unwrap();
|
||||
|
||||
println!(
|
||||
"{:<26} {:<26} {:<15} {:<8} {}",
|
||||
"Local address", "Remote address", "State", "Inode", "PID/Program name"
|
||||
);
|
||||
|
||||
for entry in tcp.into_iter().chain(tcp6) {
|
||||
// find the process (if any) that has an open FD to this entry's inode
|
||||
let local_address = format!("{}", entry.local_address);
|
||||
let remote_addr = format!("{}", entry.remote_address);
|
||||
let state = format!("{:?}", entry.state);
|
||||
if let Some(stat) = map.get(&entry.inode) {
|
||||
println!(
|
||||
"{:<26} {:<26} {:<15} {:<12} {}/{}",
|
||||
local_address, remote_addr, state, entry.inode, stat.pid, stat.comm
|
||||
);
|
||||
} else {
|
||||
// We might not always be able to find the process assocated with this socket
|
||||
println!(
|
||||
"{:<26} {:<26} {:<15} {:<12} -",
|
||||
local_address, remote_addr, state, entry.inode
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
7
vendor/procfs/examples/pressure.rs
vendored
Normal file
7
vendor/procfs/examples/pressure.rs
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
/// A basic example of /proc/pressure/ usage.
|
||||
|
||||
fn main() {
|
||||
println!("memory pressure: {:#?}", procfs::MemoryPressure::new());
|
||||
println!("cpu pressure: {:#?}", procfs::CpuPressure::new());
|
||||
println!("io pressure: {:#?}", procfs::IoPressure::new());
|
||||
}
|
||||
74
vendor/procfs/examples/process_hierarchy.rs
vendored
Normal file
74
vendor/procfs/examples/process_hierarchy.rs
vendored
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
use procfs::process::{all_processes, Stat};
|
||||
|
||||
struct ProcessEntry {
|
||||
stat: Stat,
|
||||
cmdline: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
/// Print all processes as a tree.
|
||||
/// The tree reflects the hierarchical relationship between parent and child processes.
|
||||
fn main() {
|
||||
// Get all processes
|
||||
let processes: Vec<ProcessEntry> = match all_processes() {
|
||||
Err(err) => {
|
||||
println!("Failed to read all processes: {}", err);
|
||||
return;
|
||||
}
|
||||
Ok(processes) => processes,
|
||||
}
|
||||
.filter_map(|v| {
|
||||
v.and_then(|p| {
|
||||
let stat = p.stat()?;
|
||||
let cmdline = p.cmdline().ok();
|
||||
Ok(ProcessEntry { stat, cmdline })
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.collect();
|
||||
// Iterate through all processes and start with top-level processes.
|
||||
// Those can be identified by checking if their parent PID is zero.
|
||||
for process in &processes {
|
||||
if process.stat.ppid == 0 {
|
||||
print_process(process, &processes, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a process, print its command and recursively list all child processes.
|
||||
/// This function will call itself until no further children can be found.
|
||||
/// It's a depth-first tree exploration.
|
||||
///
|
||||
/// depth: The hierarchical depth of the process
|
||||
fn print_process(process: &ProcessEntry, all_processes: &Vec<ProcessEntry>, depth: usize) {
|
||||
let cmdline = match &process.cmdline {
|
||||
Some(cmdline) => cmdline.join(" "),
|
||||
None => "zombie process".into(),
|
||||
};
|
||||
|
||||
// Some processes seem to have an empty cmdline.
|
||||
if cmdline.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// 10 characters width for the pid
|
||||
let pid_length = 8;
|
||||
let mut pid = process.stat.pid.to_string();
|
||||
pid.push_str(&" ".repeat(pid_length - pid.len()));
|
||||
|
||||
let padding = " ".repeat(4 * depth);
|
||||
println!("{}{}{}", pid, padding, cmdline);
|
||||
|
||||
let children = get_children(process.stat.pid, all_processes);
|
||||
for child in &children {
|
||||
print_process(child, all_processes, depth + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all children of a specific process, by iterating through all processes and
|
||||
/// checking their parent pid.
|
||||
fn get_children(pid: i32, all_processes: &[ProcessEntry]) -> Vec<&ProcessEntry> {
|
||||
all_processes
|
||||
.iter()
|
||||
.filter(|process| process.stat.ppid == pid)
|
||||
.collect()
|
||||
}
|
||||
25
vendor/procfs/examples/ps.rs
vendored
Normal file
25
vendor/procfs/examples/ps.rs
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
#![allow(clippy::print_literal)]
|
||||
|
||||
extern crate procfs;
|
||||
|
||||
/// A very basic clone of `ps` on Linux, in the simple no-argument mode.
|
||||
/// It shows all the processes that share the same tty as our self
|
||||
|
||||
fn main() {
|
||||
let mestat = procfs::process::Process::myself().unwrap().stat().unwrap();
|
||||
let tps = procfs::ticks_per_second().unwrap();
|
||||
|
||||
println!("{: >10} {: <8} {: >8} {}", "PID", "TTY", "TIME", "CMD");
|
||||
|
||||
let tty = format!("pty/{}", mestat.tty_nr().1);
|
||||
for p in procfs::process::all_processes().unwrap() {
|
||||
let prc = p.unwrap();
|
||||
if let Ok(stat) = prc.stat() {
|
||||
if stat.tty_nr == mestat.tty_nr {
|
||||
// total_time is in seconds
|
||||
let total_time = (stat.utime + stat.stime) as f32 / (tps as f32);
|
||||
println!("{: >10} {: <8} {: >8} {}", stat.pid, tty, total_time, stat.comm);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
56
vendor/procfs/examples/self_memory.rs
vendored
Normal file
56
vendor/procfs/examples/self_memory.rs
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
use procfs::process::Process;
|
||||
|
||||
fn main() {
|
||||
let me = Process::myself().expect("Unable to load myself!");
|
||||
println!("PID: {}", me.pid);
|
||||
|
||||
let page_size = procfs::page_size().expect("Unable to determinte page size!") as u64;
|
||||
println!("Memory page size: {}", page_size);
|
||||
|
||||
// Note: when comparing the below values to what "top" will display, note that "top" will use
|
||||
// base-2 units (kibibytes), not base-10 units (kilobytes).
|
||||
|
||||
if let Ok(stat) = me.stat() {
|
||||
println!("== Data from /proc/self/stat:");
|
||||
println!("Total virtual memory used: {} bytes", stat.vsize);
|
||||
println!(
|
||||
"Total resident set: {} pages ({} bytes)",
|
||||
stat.rss,
|
||||
stat.rss as u64 * page_size
|
||||
);
|
||||
println!();
|
||||
}
|
||||
|
||||
if let Ok(statm) = me.statm() {
|
||||
println!("== Data from /proc/self/statm:");
|
||||
println!(
|
||||
"Total virtual memory used: {} pages ({} bytes)",
|
||||
statm.size,
|
||||
statm.size * page_size
|
||||
);
|
||||
println!(
|
||||
"Total resident set: {} pages ({} byte)s",
|
||||
statm.resident,
|
||||
statm.resident * page_size
|
||||
);
|
||||
println!(
|
||||
"Total shared memory: {} pages ({} bytes)",
|
||||
statm.shared,
|
||||
statm.shared * page_size
|
||||
);
|
||||
println!();
|
||||
}
|
||||
|
||||
if let Ok(status) = me.status() {
|
||||
println!("== Data from /proc/self/status:");
|
||||
println!(
|
||||
"Total virtual memory used: {} bytes",
|
||||
status.vmsize.expect("vmsize") * 1024
|
||||
);
|
||||
println!("Total resident set: {} bytes", status.vmrss.expect("vmrss") * 1024);
|
||||
println!(
|
||||
"Total shared memory: {} bytes",
|
||||
status.rssfile.expect("rssfile") * 1024 + status.rssshmem.expect("rssshmem") * 1024
|
||||
);
|
||||
}
|
||||
}
|
||||
29
vendor/procfs/examples/shm.rs
vendored
Normal file
29
vendor/procfs/examples/shm.rs
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
extern crate procfs;
|
||||
|
||||
/// List processes using posix shared memory segments
|
||||
|
||||
fn main() {
|
||||
let shared_memory_vec = procfs::Shm::new().unwrap();
|
||||
|
||||
for shared_memory in &shared_memory_vec {
|
||||
println!("key: {}, shmid: {}", shared_memory.key, shared_memory.shmid);
|
||||
println!("============");
|
||||
|
||||
for prc in procfs::process::all_processes().unwrap() {
|
||||
let prc = prc.unwrap();
|
||||
match prc.smaps() {
|
||||
Ok(memory_maps) => {
|
||||
for (memory_map, _memory_map_data) in &memory_maps {
|
||||
if let procfs::process::MMapPath::Vsys(key) = memory_map.pathname {
|
||||
if key == shared_memory.key && memory_map.inode == shared_memory.shmid {
|
||||
println!("{}: {:?}", prc.pid, prc.cmdline().unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => continue,
|
||||
}
|
||||
}
|
||||
println!();
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue