# System
# User
# Add
useradd -m -s /bin/bash b.dauphin
useradd -M -r -s /bin/false -d /nonexistent prodigy
-r
create a system account (uid -1024)-s
shell path-m
create home dir-M
no create home dir-d
home directory
# Change password
echo 'root:toto'
| chpasswd
2
or get prompt for changing your current user passwd
# prompt...
# impersonate
switch to a user (default root)
su -
su - b.dauphin
2
# sudo
# Edit
In ordre to edit sudoer file, use the proper tool visudo
. Because even for root
the file is readonly
visudo -f /var/tmp/sudoers.new
visudo -f /etc/sudoers
2
# Checkconfig
visudo -c
/etc/sudoers: parsed OK
/etc/sudoers.d/dev: parsed OK
visudo -f /etc/sudoers.d/qwbind-dev -c
/etc/sudoers.d/qwbind-dev: parsed OK
2
3
4
5
6
# Group
# Add
Add user baptiste to sudoer
usermod -aG sudo baptiste
usermod -aG wireshark b.dauphin
2
# Performance
htop
nload
2
# Memory information
free -g
# Sort by memory
To sort by memory usage we can use either %MEM or RSS columns.
RSS
Resident Set Size is a total memory usage in kilobytes%RAM
shows the same information in terms of percent usage of total memory amount available.
ps aux --sort=+rss
ps aux --sort=%mem
2
Empty swap
swapoff -a && swapon -a
# htop
How to read memory usage in htop?
htop
- Hide
user
threadsshift + H
- Hide
kernel
threadsshift + K
- close the process tree view
F5
- then you can sort out the process of your interest by PID and read the RES column
- sort by MEM% by pressing
shift + M
, orF3
to search in cmd line)
# Get memory physical size
# Kilobyte
grep MemTotal /proc/meminfo | awk '{print $2}'
# MegaByte
grep MemTotal /proc/meminfo | awk '{print $2}' | xargs -I {} echo "scale=4; {}/1024^1" | bc
# GigaByte
grep MemTotal /proc/meminfo | awk '{print $2}' | xargs -I {} echo "scale=4; {}/1024^2" | bc
# Get number processing units (CPU / cores)
available to the current process (may be less than all online)
nproc
all online
nproc --all
old fashion version
grep -c ^processor /proc/cpuinfo
# Graphic
- Graphic server (often X11, Xorg, or just X, it's the same software)
- Display Manager (SDDM, lightDM, gnome)
- Windows Manager (i3-wm, gnome)
# Display Manager
# SDDM - lightweight
Traduit de l'anglais-Simple Desktop Display Manager est un gestionnaire d’affichage pour les systèmes de fenêtrage X11 et Wayland. SDDM a été écrit à partir de zéro en C ++ 11 et supporte la thématisation via QML
service sddm status
service sddm restart : restart sddm (to load new monitor)
2
# Gnome - Nice display for personal laptop
# Windows Manager
# i3
update-alternatives --install /usr/bin/x-window-manager x-window-manager /usr/bin/i3 20
# Automatically starting applications on i3 startup
https://i3wm.org/docs/userguide.html#_automatically_starting_applications_on_i3_startup
# Shell - stream
# Redirection
The >
operator redirects the output usually to a file but it can be to a device. You can also use >>
to append. If you don't specify a number then the standard output stream is assumed but you can also redirect errors
>
file redirects stdout to file1>
file redirects stdout to file2>
file redirects stderr to file&>
file redirects stdout and stderr to file
/dev/null is the null device it takes any input you want and throws it away. It can be used to suppress any output.
is there a difference between > /dev/null 2>&1
and &> /dev/null
?
&> is new in Bash 4, the former is just the traditional way, I am just so used to it (easy to remember)
The <<< word
The word is expanded and supplied to the command on its standard
input
.
Do not be mistaken for <
which do not feed stdin
.
cat <<< hello
hello
2
## Shell - common command
# Cut
Split stdin into a seperated list of N args
cat text.data | \
cut -f1 -d'/' | \
cut -c 4-
2
3
-d'/'
: List delimiter-f1
: keep the first arg (after the first occurence of delimiter)-f2
: keep the first argcut -c -4
: remove all except the 4th first charcut -c 4
: remove all except the 4th charcut -c 4-
: remove the 4 first char
# tr
Stands for translate
ARG1 into ARG2 remove some characters ( and ) if found
Remove, because replaced by nothing
.. | tr -d '{}'
Replace {}
by ()
.. | tr -d '{}' '()'
Remove all spaces
echo 'a b c d de d fre' | tr -d '[:blank:]'
# gunzip
Default, extract file to STOUT-c : write on standard output, keep original files unchanged
gunzip -c file.gz > file
# egrep
grep using regex Match lines matching regex CN=.*
(slach is to escape *
char from bash interpretation)
egrep CN=.\* file.ca.server
Match lines matching regex CN=.*
AND just keep matching characters
egrep CN=.\* file.ca.server -o
Remove empty lines and commented ones.
egrep -v '^(#|[[:space:]]|$)' /var/opt/gitlab/gitlab-shell/config.yml
#
: a classic character like a,b,c\t
: space[[:space:]]
: match whitespace- spaces, tabs, carriage returns, etc.$
: empty line
# awk
pattern scanning and processing language
echo 'test troll hello' | awk '{print $1}'
test
echo 'test troll hello' | awk '{print $2}'
troll
echo 'test troll hello' | awk '{print $3}'
hello
kubectl get pods -o wide | awk '($3 ~ "Running") && ($6 ~ "10.233.64.1.*") {print "\n Drop >> "$1;system("ping -c 1 "$7)}'
2
3
4
5
6
7
8
9
10
# less
Start at the end of a file
- will run an initial command when the file is opened G jumps to the end
less +G app.log
# sed
Stream editor
Cmd | meaning |
---|---|
sed -n | silent mode (default behaviour) |
sed -n | silent mode. By default print nothing. Use with /p to print interesting cmd |
sed -i | agit non pas sur l'input stream mais sur le fichier specifié |
sed -f script_file | Take instruction from script |
Example
Replace patern 1
by patern 2
sed -i 's/patern 1/patern 2/g' /etc/ssh/sshd_config
Slash escaping with \
. Replace ../src
by ./src
sed -i 's/..\/src/.\/src/g' README.md
replace Not after
by nothing from the input stream
... | sed -n 's/ *Not After : *//p'
cmd | meaning |
---|---|
sed '342d' -i ~/.ssh/known_hosts | remove 342th line of file |
sed '342,342d' -i ~/.ssh/known_hosts | remove 342th to 342th line, equivalent to precedent cmd |
sed -i '1,42d' -i test.sql | remove first 42 lines of test.sql |
# find
common usage
find . -maxdepth 1 -type l -ls
find /opt -type f -mmin -5 -exec ls -ltr {} +
find /var/log/nginx -type f -name "*access*" -mmin +5 -exec ls -ltr {} +
find . -type f -mmin -5 -print0 | xargs -0 /bin/ls -ltr
find /var/log -name "*.gz" -type f | xargs rm -f
2
3
4
5
6
Truncate 😉
echo "$(tail -1000000 /var/log/maillog-20201115)" > /var/log/maillog-20201115
cmd | meaning |
---|---|
find -mtime n | last DATA MODIFICATION time (day) |
find -atime n | last ACCESS time (day) |
find -ctime n | last STATUS MODIFICATION time (day) |
"Modify" is the timestamp of the last time the file's content has been mofified. This is often called "mtime".
"Change" is the timestamp of the last time the file's inode has been changed, like by changing permissions, ownership, file name, number of hard links. It's often called "ctime".
list in the current directory, all files last modifed more (+10) than 10 days ago, historical order list in the current directory, all files last modifed less (-10) than 10 days ago, historical order
find . -type f -mtime +10 -exec ls -ltr {} +
find . -type f -mtime -10 -exec ls -ltr {} +
2
list files with last modified date of LESS than 5 minutes
find . -type f -mmin -5 -exec ls -ltr {} +
# xargs
xargs reads items from the standard input, delimited by blanks (which can be protected with double or single quotes or a backslash) or newlines, and executes the command (default is /bin/echo) one or more times with any initial-arguments followed by items read from standard input. Blank lines on the standard input are ignored.
You can defined the name of the received arg (from stdin). In the following example the chosen name is %
.
The following example : takes all the .log files and mv them into a directory named 'working_sheet_of_the_day'
ls *.log | xargs -I % mv % ./working_sheet_of_the_day
# Lsof
list all system open files
lsof
list open files opened by a given command
lsof -c salt-master | wc -l
12320
2
List "dead file" deleted on file system but not yet release by the original process which opened it !
lsof | grep deleted
# Tar
Compress and extract files
tar zfcv myfiles.tar.gz /dir1 /dir2 /dir3
extract in a given directory
tar zxvf somefilename.tar.gz or .tgz
tar jxvf somefilename.tar.bz2
tar xf file.tar -C /path/to/directory
2
3
Create a full copy of /
(root filesystem), but excluding some dir (/proc
, /sys
, /dev/pts
, tmp_root_fs
)
tar \
-cvpf /tmp_root_fs/backups/fullbackup.tar \
--directory=/ \
--exclude=proc \
--exclude=sys \
--exclude=dev/pts \
--exclude=tmp_root_fs \
.
2
3
4
5
6
7
8
Command | meaning |
---|---|
-c | create (name your file .tar) |
-(c)z | archive type gzip (name your file .tar.gz) |
-(c)j | archive type bzip2 |
-x | extract |
-f | file |
-v | verbose |
-C | Set dir name to extract files |
--directory | same |
# Diff
diff --color -u -r \
./_sass \
../tuxador.github.io/_sass \
--exclude=_posts \
--exclude=README.md \
-x "index.md" \
-x "about.md" \
-x ".gitignore" \
--exclude=.git
2
3
4
5
6
7
8
9
Command | meaning |
---|---|
-u | output NUM (default 3) lines of unified context |
-r | recursively compare any subdirectories found |
-x, --exclude=PAT | exclude files that match PAT |
-X, --exclude-from=FILE | exclude files that match any pattern in FILE |
# Patch
You can apply the result (output) of the diff command to a file, in order to avoid manually edit or replace
diff --color -u oldversion.py newversion.py
--- oldversion.py 2020-03-29 14:57:50.221912565 +0200
+++ newversion.py 2020-03-29 14:58:09.112328818 +0200
@@ -1 +1,2 @@
line1
+line 2
2
3
4
5
6
diff --color -u oldversion.py newversion.py | my.patch
Apply your patch to your file
patch --backup -u oldversion.py -i my.patch
### Test The command to perform a comparison.
test 1 -eq 2 && echo "yes" || echo "no"
1
: The first element you are going to compare. In this example, it's the number 1 but it could be any number, or a string within quotes.-eq
: The method of comparison. In this case, you are testing whether one value equals another.2
: The element you are comparing the first element against. In this example, it's the number 2.&&
: A Linux shortcut to chain commands together, in sequence. The output from the test chains to the commands that follow. A double-ampersand executes when the command that precedes it has an exit status of 0, which is a fancy way of saying that the command didn't fail.echo "yes"
: The command to run if the comparison succeeds. In this case, all we're doing is asking the echo command to print the word "yes" to standard output, but you could run any command here that would execute if the test result proved true.||
: The opposite, in a way, of &&; the double-pipe only executes if the command that precedes it fails (has an exit status that's not 0).echo "no"
: The command to run if the comparison fails.
# Tee
cat <<EOF | sudo tee /etc/apt/preferences.d/pin-gitlab-ee.pref
Explanation: Prefer GitLab provided packages over the Debian native ones
Package: gitlab-ee
Pin: version 13.3.5-ee.0
Pin-Priority: 1001
EOF
2
3
4
5
6
# Bash
Very powerful cheat sheetopen in new window
Every day use A pretty good tutorialopen in new window
# Common commands
Command | meaning |
---|---|
file | get meta info about that file |
tail -n 15 -f | print content of file begining by end, for n lines, with keep following new files entries |
head -n 15 | print content of a file begining by begining |
who | info about connected users |
w | same with more info |
wall | print on all TTY (for all connected user) |
sudo updatedb | update the local database of the files present in the filesystem |
locate file_name | Search into this databases |
echo app.$(date +%Y_%m_%d) | print a string based on subshell return |
touch app.$(date +%Y_%m_%d) | create empty file named on string based on subshell return |
mkdir app.$(date +%Y_%m_%d) | create directory named on string based on subshell return |
echo $(date +%d-%m-%Y-%H:%M:%S) | |
sh | run a 'sh' shell, very old shell |
bash | run a 'bash' shell, classic shell of debian 7,8,9 |
zsh | run a 'zsh' shell, new shell |
for i in google.com free.fr wikipedia.de ; do dig $i +short ; done |
# Operator
Operator | Description |
---|---|
! EXPRESSION | The EXPRESSION is false. |
-n STRING | The length of STRING is greater than zero. |
-z STRING | The lengh of STRING is zero (ie it is empty). |
STRING1 = STRING2 | STRING1 is equal to STRING2 |
STRING1 != STRING2 | STRING1 is not equal to STRING2 |
INTEGER1 -eq INTEGER2 | INTEGER1 is numerically equal to INTEGER2 |
INTEGER1 -gt INTEGER2 | INTEGER1 is numerically greater than INTEGER2 |
INTEGER1 -lt INTEGER2 | INTEGER1 is numerically less than INTEGER2 |
-d FILE | FILE exists and is a directory. |
-e FILE | FILE exists. |
-f FILE | True if file exists AND is a regular file. |
-r FILE | FILE exists and the read permission is granted. |
-s FILE | FILE exists and its size is greater than zero (ie. it is not empty). |
-w FILE | FILE exists and the write permission is granted. |
-x FILE | FILE exists and the execute permission is granted. |
-eq 0 | COMMAND result equal to 0 |
$? | last exit code |
$# | Number of parameters |
$@ | expands to all the parameters |
# example
if [ -f /tmp/test.txt ];
then
echo "true";
else
echo "false";
fi
2
3
4
5
6
$ true && echo howdy!
howdy!
$ false || echo howdy!
howdy!
2
3
4
5
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
DIR="$(dirname "$0")"
2
# For
for i in `seq 1 6`
do
mysql -h 127.0.0.1 -u user -p password -e "show variables like 'server_id'; select user()"
done
2
3
4
# Backward compatibility
# Why is $(...) preferred over ...
(backticks)?
...
is the legacy syntax required by only the very oldest of non-POSIX-compatible bourne-shells. There are several reasons to always prefer the $(...) syntax:
# Backslashes () inside backticks are handled in a non-obvious manner:
$ echo "`echo \\a`" "$(echo \\a)"
a \a
$ echo "`echo \\\\a`" "$(echo \\\\a)"
\a \\a
# Note that this is true for *single quotes* too!
$ foo=`echo '\\'`; bar=$(echo '\\'); echo "foo is $foo, bar is $bar"
foo is \, bar is \\
2
3
4
5
6
7
# Nested quoting inside $() is far more convenient.
echo "x is $(sed ... <<<"$y")"
In this example, the quotes around $y are treated as a pair, because they are inside $(). This is confusing at first glance, because most C programmers would expect the quote before x and the quote before $y to be treated as a pair; but that isn't correct in shells. On the other hand,
echo "x is `sed ... <<<\"$y\"`"
# It makes nesting command substitutions easier. Compare:
x=$(grep "$(dirname "$path")" file)
x=`grep "\`dirname \"$path\"\`" file`
2
# Environment variable
Be very careful to the context of their definition
set variable to current shell
export http_proxy=http://10.10.10.10:9999
echo $http_proxy
2
should print the value
set variables only for the current line execution
http_proxy=http://10.10.10.10:9999 wget -O - https://repo.saltstack.com/apt/debian/9/amd64/latest/SALTSTACK-GPG-KEY.pub
echo $http_proxy
2
will return nothing because it doesn't exist anymore
Export multiple env var
export {http,https,ftp}_proxy="http://10.10.10.10:9999"
Useful common usage
export http_proxy=http://10.10.10.10:9999/
export https_proxy=$http_proxy
export ftp_proxy=$http_proxy
export rsync_proxy=$http_proxy
export no_proxy="localhost,127.0.0.1,localaddress,.localdomain.com"
2
3
4
5
Remove variable
unset http_proxy
unset http_proxy unset https_proxy unset HTTP_PROXY unset HTTPS_PROXY unset
2
# Process
# get processes info
debian style
ps -ef
ps -o pid,user,%mem,command ax
2
Get parent pid of a given pid
ps -o ppid= -p 750
ps -o ppid= -p $(pidof systemd)
2
RedHat style
ps aux
# Kill
kill default TERM
kill -l list all signals
kill -l 15 get name of signal
kill -s TERM PID
kill -TERM PID
kill -15 PID
2
3
4
5
6
Shortcut
shortcut | meaning |
---|---|
ctrl + \ | SIGQUIT |
ctrl + C | SIGINT |
# signals list
Number | Name (short name) | Description Used for |
---|---|---|
0 SIGNULL (NULL) | Null | Check access to pid |
1 SIGHUP (HUP) | Hangup Terminate | can be trapped |
2 SIGINT (INT) | Interrupt Terminate | can be trapped |
3 SIGQUIT (QUIT) | Quit Terminate with core dump | can be trapped |
9 SIGKILL (KILL) | Kill Forced termination | cannot be trapped |
15 SIGTERM (TERM) | Terminate Terminate | can be trapped |
24 SIGSTOP (STOP) | Stop Pause the process | cannot be trapped. This is default if signal not provided to kill command. |
25 SIGTSTP (STP) | Stop/pause the process | can be trapped |
26 SIGCONT (CONT) | Continue | Run a stopped process |
xeyes &
jobs -l
kill -s STOP 3405
jobs -l
kill -s CONT 3405
jobs -l
kill -s TERM 3405
2
3
4
5
6
7
list every running process
ps -ef | grep ssh-agent | awk '{print $2}'
ps -ef | grep ssh-agent | awk '$0=$2'
2
Print only the process IDs of syslogd:
ps -C syslogd -o pid=
Print only the name of PID 42:
ps -q 42 -o comm=
To see every process running as root (real & effective ID) in user format:
ps -U root -u root u
Get PID (process Identifier) of a running process
pidof iceweasel
pgrep ssh-agent
2
# process substitution
diff <(cat /etc/passwd) <(cut -f2 /etc/passwd)
<(...) is called process substitution. It converts the output of a command into a file-like object that diff can read from. While process substitution is not POSIX, it is supported by bash, ksh, and zsh.
# Inter-process communication
User's IPC shared memory, semaphores, and message queues
Type of IPC object. Possible values are:
q -- message queue
m -- shared memory
s -- semaphore
2
3
4
USERNAME=$1
TYPE=$2
ipcs -$TYPE | grep $USERNAME | awk ' { print $2 } ' | xargs -I {} ipcrm -$TYPE {}
ipcs -s | grep zabbix | awk ' { print $2 } ' | xargs -I {} ipcrm -s {}
2
3
4
5
6
# File system
Unix File types
Description | symbol |
---|---|
Regular file | - |
Directory | d |
Special files | (5 sub types in it) |
block file | b |
Character device file | c |
Named pipe file or just a pipe file | p |
Symbolic link file | l |
Socket file | s |
# Show size
df -h
du -sh --exclude=relative/path/to/uploads --exclude other/path/to/exclude
du -hsx --exclude=/{proc,sys,dev} /*
lsblk
ncdu
ncdu --exclude /backup --exclude /opt/zimbra /
2
3
4
5
6
7
8
# Mount
list physical disk and then, mount them of your filesystem
lsblk
fdisk -l
sudo mount /dev/sdb1 /mnt/usb
2
3
List read only filesystem
awk '$4~/(^|,)ro($|,)/' /proc/mounts
# Unmount
umount /mnt
you do so, you will get the “umount: /mnt: device is busy.” error as shown below.
umount /mnt
umount: /mnt: device is busy.
(In some cases useful info about processes that use
the device is found by lsof(8) or fuser(1))
2
3
4
Use fuser command to find out which process is accessing the device along with the user name.
fuser -mu /mnt/
/mnt/: 2677c(sathiya)
2
- fuser – command used to identify processes using the files / directories
- -m – specify the directory or block device along with this, which will list all the processes using it.
-u – shows the owner of the process
You got two choice here.
- Ask the owner of the process to properly terminate it or
- You can kill the process with super user privileges and unmount the device.
# Forcefully umount a busy device
When you cannot wait to properly umount a busy device, use umount -f as shown below.
umount -f /mnt
If it still doesn’t work, lazy unmount should do the trick. Use umount -l as shown below.
umount -l /mnt
# How to 'root a system' after lost root password
When lost remote access to machine.
Reboot the system
press e
to edit grub
After editing grub, add this at the end of linux line init=/bin/bash
grub config extract
menuentry 'Debian GNU/Linux, with Linux 4.9.0-8-amd64 {
load_video
insmod gzio
if [ x$grub_platform = xxen ]; then insmod xzio; insmod lzopio; fi
insmod part_gpt
insmod ext2
...
...
...
echo 'Loading Linux 4.9.0-8-amd64 ...'
linux /vmlinuz-4.9.0-8-amd64 root=/dev/mapper/debian--baptiste--vg-root ro quiet
echo 'Loading initial ramdisk ...'
initrd /initrd.img-4.9.0-8-amd64
}
2
3
4
5
6
7
8
9
10
11
12
13
14
Change this line
linux /vmlinuz-4.9.0-8-amd64 root=/dev/mapper/debian--baptiste--vg-root ro quiet
into this
linux /vmlinuz-4.9.0-8-amd64 root=/dev/mapper/debian--baptiste--vg-root rw quiet init=/bin/bash
F10 to boot with the current config
Make writable the root filesystem (useless if you switched 'ro' into 'rw')
mount -n -o remount,rw /
Make your modifications
passwd user_you_want_to_modify
# or
vim /etc/iptables/rules.v4
2
3
to exit the prompt and reboot the computer.
exec /sbin/init
# Check filesystem
fsck.ext4 /dev/mapper/vg_data-lv_data
e2fsck 1.43.4 (31-Jan-2017)
/dev/mapper/VgData-LvData contient un système de fichiers comportant des erreurs, vérification forcée.
Passe 1 : vérification des i-noeuds, des blocs et des tailles
Passe 2 : vérification de la structure des répertoires
Passe 3 : vérification de la connectivité des répertoires
Passe 4 : vérification des compteurs de référence
Passe 5 : vérification de l information du sommaire de groupe
2
3
4
5
6
7
8
# symbolic link
# update an existing
ln -sfTv /opt/app_$TAG /opt/app_current
# Open Files
List open file, filter by deleted
Very useful when you have incoherence between result of df -h
and du -sh /*
It may happens that you remove a file, but another process file descriptor is still using it. So, view from the filesystem, space is not released/free
lsof -nP | grep '(deleted)'
List open files by a given command
lsof -c salt-minion
lsof -c salt-minion | grep deleted
2
# Init.d
Old System control replaced by Systemd since debian 8 aka SystemV, aka old fashioned way, prefered by some people due to full control provided by a on directly modifiable bash script located under /etc/init.d/
usage
service rsyslog status
change process management
vim /etc/init.d/rsyslog
# Systemd
Introduced since debian 8
Based on internal and templated management. The only way to interact with systemd is by modifying instructions (but not directly code) on service file
.
The can be located under different directories.
Where are Systemd Unit Files Found?
The files that define how systemd will handle a unit can be found in many different locations, each of which have different priorities and implications.
The system’s copy of unit files are generally kept in the /lib/systemd/system directory. When software installs unit files on the system, this is the location where they are placed by default.
Unit files stored here are able to be started and stopped on-demand during a session. This will be the generic, vanilla unit file, often written by the upstream project’s maintainers that should work on any system that deploys systemd in its standard implementation. You should not edit files in this directory. Instead you should override the file, if necessary, using another unit file location which will supersede the file in this location.
If you wish to modify the way that a unit functions, the best location to do so is within the /etc/systemd/system directory. Unit files found in this directory location take precedence over any of the other locations on the filesystem. If you need to modify the system’s copy of a unit file, putting a replacement in this directory is the safest and most flexible way to do this.
If you wish to override only specific directives from the system’s unit file, you can actually provide unit file snippets within a subdirectory. These will append or modify the directives of the system’s copy, allowing you to specify only the options you want to change.
The correct way to do this is to create a directory named after the unit file with .d appended on the end. So for a unit called example.service, a subdirectory called example.service.d could be created. Within this directory a file ending with .conf can be used to override or extend the attributes of the system’s unit file.
There is also a location for run-time unit definitions at /run/systemd/system. Unit files found in this directory have a priority landing between those in /etc/systemd/system and /lib/systemd/system. Files in this location are given less weight than the former location, but more weight than the latter.
The systemd process itself uses this location for dynamically created unit files created at runtime. This directory can be used to change the system’s unit behavior for the duration of the session. All changes made in this directory will be lost when the server is rebooted.
Resume
Location | override/supersede priority (higher takes precedence) | Meaning |
---|---|---|
/run/systemd/system | 1 | Run-time only, lost after systemd reboot |
/etc/systemd/system | 2 | SysAdmin maintained |
/lib/systemd/system | 3 | Packages vendor maintained (apt, rpm, pacman, ...) |
# systemctl
# show all installed unit files
systemctl list-unit-files --type=service
# loaded
systemctl list-units --type=service --state=loaded
# active
systemctl list-units --type=service --state=active
# running
systemctl list-units --type=service --state=running
# show a specific property (service var value)
systemctl show --property=Environment docker
# print all content
systemctl show docker --no-pager | grep proxy
systemctl show docker --no-pager | grep LimitMEMLOCK
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Tips
grep locked /proc/$(ps --no-headers -o pid -C dockerd | tr -d ' ')/limits
echo -e "[Service]\nLimitMEMLOCK=infinity" | SYSTEMD_EDITOR=tee systemctl edit docker.service
systemctl daemon-reload
systemctl restart docker
2
3
4
5
# Syslog-ng
syslog-ng is a syslog implementation which can take log messages from sources and forward them to destinations, based on powerful filter directives.
Note: With systemd's journal
(journalctl), syslog-ng is not needed
by most users.
If you wish to use both the journald and syslog-ng files, ensure the following settings are in effect. For systemd-journald, in the /etc/systemd/journald.conf
file, Storage=
either set to auto or unset (which defaults to auto) and ForwardToSyslog=
set to no or unset (defaults to no). For /etc/syslog-ng/syslog-ng.conf
, you need the following source
stanza:
source src {
# syslog-ng
internal();
# systemd-journald
system();
};
2
3
4
5
6
A very good overview, official docopen in new windowStill a very good ArchLinux tutorialopen in new window
# syslog-ng and systemd journal
Starting with syslog-ng version 3.6.1 the default system()
source on Linux systems using systemd uses journald
as its standard system()
source.
Typically
systemd-journald
- stores message from unit that it manages
sshd.service
- unit.{service,slice,socket,scope,path,timer,mount,device,swap}
- stores message from unit that it manages
syslog-ng
- read INPUT message from
systemd-journald
- write OUTPUT various files under
/var/log/*
- read INPUT message from
Examples from default config:
log { source(s_src); filter(f_auth); destination(d_auth); };
log { source(s_src); filter(f_cron); destination(d_cron); };
log { source(s_src); filter(f_daemon); destination(d_daemon); };
log { source(s_src); filter(f_kern); destination(d_kern); };
2
3
4
# Journal
# Definition
journalctl is a command for viewing logs collected by systemd. The systemd-journald service is responsible for systemd’s log collection, and it retrieves messages from the kernel, systemd services, and other sources.
These logs are gathered in a central location, which makes them easy to review. The log records in the journal are structured and indexed, and as a result journalctl is able to present your log information in a variety of useful formats.
# Run the journalctl command without any arguments to view all the logs in your journal:
journalctl
journalctl -r
2
Each line starts with the date (in the server’s local time), followed by the server’s hostname, the process name, and the message for the log
# [journalctl]
journalctl --priority=0..3 --since "12 hours ago"
-u --unit=UNIT
- --user-unit=UNIT --no-pager --list-boots -b --boot[=ID] -e --pager-end -f --follow -p --priority=RANGE
0: emerg
1: alert
2: crit
3: err
4: warning
5: notice
6: info
7: debug
2
3
4
5
6
7
8
# Paging through Your Logs
# journalctl pipes its output to the less command
Key command | Action |
---|---|
down arrow key, enter, e, or j | Move down one line. |
up arrow key, y, or k | Move up one line. |
space bar | Move down one page. |
b | Move up one page. |
right arrow key | Scroll horizontally to the right. |
left arrow key | Scroll horizontally to the left. |
g | Go to the first line. |
G | Go to the last line. |
10g | Go to the 10th line. Enter a different number to go to other lines. |
50p or 50% | Go to the line half-way through the output. Enter a different number to go to other percentage positions. |
/search term | Search forward from the current position for the search term string. |
?search term | Search backward from the current position for the search term string. |
n | When searching, go to the next occurrence. |
N | When searching, go to the previous occurrence. |
m"c" | Set a mark, which saves your current position. Enter a single character in place of "c" to label the mark with that character. |
'"c" | Return to a mark, where "c" is the single character label for the mark. Note that ' is the single-quote. |
q | Quit less |
# View journalctl without PagingPermalink
journalctl --no-pager
It’s not recommended that you do this without first filtering down the number of logs shown.
# Show Logs within a Time RangePermalink
journalctl --since "2018-08-30 14:10:10"
journalctl --until "2018-09-02 12:05:50"
2
# Show Logs for a Specific BootPermalink
journalctl --list-boots
journalctl -b -2
journalctl -b
2
3
journalctl -u ssh
# View Kernel MessagesPermalink
journalctl -k
# Change the Log Output FormatPermalink
Format Name | Description |
---|---|
short | The default option, displays logs in the traditional syslog format. |
verbose | Displays all information in the log record structure. |
json | Displays logs in JSON format, with one log per line. |
json-pretty | Displays logs in JSON format across multiple lines for better readability. |
cat | Displays only the message from each log without any other metadata. |
journalctl -o json-pretty
# Persist Your LogsPermalink
systemd-journald can be configured to persist your systemd logs on disk, and it also provides controls to manage the total size of your archived logs. These settings are defined in /etc/systemd/journald.conf To start persisting your logs, uncomment the Storage line in /etc/systemd/journald.conf and set its value to persistent. Your archived logs will be held in /var/log/journal. If this directory does not already exist in your file system, systemd-journald will create it.
# After updating your journald.conf, load the change:
systemctl restart systemd-journald
# Control the Size of Your Logs’ Disk UsagePermalink
The following settings in journald.conf control how large your logs’ size can grow to when persisted on disk:
Setting | Description |
---|---|
SystemMaxUse | The total maximum disk space that can be used for your logs. |
SystemKeepFree | The minimum amount of disk space that should be kept free for uses outside of systemd-journald’s logging functions. |
SystemMaxFileSize | The maximum size of an individual journal file. |
SystemMaxFiles | The maximum number of journal files that can be kept on disk. |
systemd-journald will respect both SystemMaxUse and SystemKeepFree, and it will set your journals’ disk usage to meet whichever setting results in a smaller size.
# To view your default limits, run:
journalctl -u systemd-journald
journalctl --disk-usage
journalctl --verify
2
3
4
5
# Manually Clean Up Archived LogsPermalink
journalctl offers functions for immediately removing archived journals on disk. Run journalctl with the --vacuum-size option to remove archived journal files until the total size of your journals is less than the specified amount. For example, the following command will reduce the size of your journals to 2GiB:
journalctl --vacuum-size=2G
Run journalctl with the --vacuum-time option to remove archived journal files with dates older than the specified relative time. For example, the following command will remove journals older than one year:
journalctl --vacuum-time=1years
#### Logger To write into the journal
logger -n syslog.baptiste-dauphin.com --rfc3164 --tcp -P 514 -t 'php95.8-fpm' -p local7.error 'php-fpm error test'
logger -n syslog.baptiste-dauphin.com --rfc3164 --udp -P 514 -t 'sshd' -p local7.info 'sshd error : test '
logger -n syslog.baptiste-dauphin.com --rfc3164 --udp -P 514 -t 'sshd' -p auth.info 'sshd error : test'
for ((i=0; i < 10; ++i)); do logger -n syslog.baptiste-dauphin.com --rfc3164 --tcp -P 514 -t 'php95.8-fpm' -p local7.error 'php-fpm error test' ; done
salt -C 'G@app:api and G@env:production and G@client:mattrunks' \
cmd.run "for ((i=0; i < 10; ++i)); do logger -n syslog.baptiste-dauphin.com --rfc3164 --tcp -P 514 -t 'php95.8-fpm' -p local7.error 'php-fpm error test' ; done" \
shell=/bin/bash
logger '@cim: {"name1":"value1", "name2":"value2"}'
2
3
4
5
6
7
8
9
10
11
# Cronjob
Stop to get nervous breakdown by computing cronjob timer by yourself.
Some kind people have developed this for you.
https://crontab.guru/
# Log Rotate
don't do anything just checkconfig
logrotate -d /etc/logrotate/logrotate.conf
# run logrotate
logrotate /etc/logrotate.conf -v
# Exemple
/var/log/dpkg.* {
monthly
rotate 12
size 100M
compress
delaycompress
missingok
notifempty
create 644 root root
}
2
3
4
5
6
7
8
9
10
Other exempleopen in new window
# Iptables
Some good explanationsopen in new window
ArchLinux iptables good explanationsopen in new window
# Show current rules (but not saved)
iptables-save
# Rules
# Save rules
iptables-save > /etc/iptables/rules.v4
# Print rules
iptables -L
iptables -nvL
iptables -nvL INPUT
iptables -nvL OUTPUT
iptables -nvL PREROUTING
2
3
4
5
# once a rule is apply, it''s immediatly applied !!!
# The Default linux iptables chain policy is ACCEPT for all INPUT, FORWARD and OUTPUT policies. You can easily change this default policy to DROP with below listed commands.
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT DROP
iptables --policy INPUT DROP
iptables -P chain target [options] --policy -P chain target
--append -A chain Append to chain
--check -C chain Check for the existence of a rule
--delete -D chain Delete matching rule from chain
iptables --list Print rules in human readable format
iptables --list-rules Print rules in iptables readable format
iptables -v -L -n
2
3
4
5
6
7
8
9
10
11
12
# Range multiport
iptables -A OUTPUT -d 10.10.10.10/32 -p tcp -m state --state NEW -m tcp --match multiport --dports 4506:10000 -j ACCEPT
# Forwarding
from jenkins tutoopen in new window
iptables -A PREROUTING -t nat -i eth0 -p tcp --dport 80 -j REDIRECT --to-port 8080
iptables -A PREROUTING -t nat -i eth0 -p tcp --dport 443 -j REDIRECT --to-port 8443
2
Verify, by using nat
table
iptables -L -t nat
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
REDIRECT tcp -- anywhere anywhere tcp dpt:http redir ports 8080
REDIRECT tcp -- anywhere anywhere tcp dpt:https redir ports 8443
Chain INPUT (policy ACCEPT)
target prot opt source destination
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
2
3
4
5
6
7
8
9
10
11
12
13
14
# NOTRACK
iptables -t raw -I PREROUTING -j NOTRACK
iptables -t raw -I OUTPUT -j NOTRACK
2
# LOG
# on log les paquets drop
iptables -A INPUT -j LOG --log-prefix "INPUT:DROP:" --log-level 6
iptables -A INPUT -j DROP
iptables -P INPUT DROP
iptables -A OUTPUT -j LOG --log-prefix "OUTPUT:DROP:" --log-level 6
iptables -A OUTPUT -j DROP
iptables -P OUTPUT DROP
2
3
4
5
6
7
add new rules when NOTRACK is set
# INPUT new rule
you have to temporarily REMOVE log and drop last lines, otherwise, your new line
# will never be taken !
iptables -D INPUT -j LOG --log-prefix "INPUT:DROP:" --log-level 6
iptables -D INPUT -j DROP
2
# add your new rule
iptables -A INPUT -p udp -m udp --sport 123 -j ACCEPT
# put back logging and dropping
iptables -A INPUT -j LOG --log-prefix "INPUT:DROP:" --log-level 6
iptables -A INPUT -j DROP
2
# Conntrack
# debian 8
and under, get info about connection tracking. Current and max
cat /proc/sys/net/netfilter/nf_conntrack_count
cat /proc/sys/net/netfilter/nf_conntrack_max
2
# debian 9
with a wrapper, easier to use !
conntrack -L [table] [options] [-z]
conntrack -G [table] parameters
conntrack -D [table] parameters
conntrack -I [table] parameters
conntrack -U [table] parameters
conntrack -E [table] [options]
conntrack -F [table]
conntrack -C [table]
conntrack -S
2
3
4
5
6
7
8
9
# Sysctl
Print all sysctem running vars
Filter max conntrack allowed
sysctl -a | grep net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_max = 1610612736
2
3
# Ssh
# Client
# Askpass and tty
ssh 10.10.10.10 "sudo cat /var/cache/bind/zones/db.example.com.include"
sudo: no tty present and no askpass program specified
ssh -t 10.10.10.10 "sudo cat /var/cache/bind/zones/db.example.com.include"
[sudo] password for baptiste:
it works !!!
2
3
4
5
6
# Speed Up your SSH client
mkdir ~/.ssh/sockets/
head ~/.ssh/config
Host *
ControlMaster auto
ControlPath ~/.ssh/sockets/%r@%h-%p
ControlPersist 600
2
3
4
5
6
7
8
# Config
~/.ssh/config
Host *
ControlMaster auto
ControlPath ~/.ssh/sockets/%r@%h-%p
ControlPersist 600
Host 10.10.10.10
PubkeyAcceptedKeyTypes +ssh-rsa
Host 10.*.*.*
User b.dauphin
StrictHostKeyChecking no
IdentityFile /home/baptiste/.ssh/id_rsa/id_RSA_user
Host bastion
Hostname 10.10.10.10
User b.dauphin
IdentityFile /home/baptiste/.ssh/id_rsa/id_RSA_user
Host 10.10.*.*
User b.dauphin
IdentityFile /home/baptiste/.ssh/id_rsa/id_RSA_user
ProxyCommand ssh -W %h:%p bastion
Host * !10.100.*.* !10.99.*.* !8.8.8.8
User b.dauphin
IdentityFile /home/baptiste/.ssh/id_rsa/id_RSA_user
Host 10.99.99.*
User root
PreferredAuthentications=password
PubkeyAuthentication=no
Host gitlab-perso
HostName gitlab.com
User git
IdentityFile /home/baptiste/.ssh/id_rsa/id_RSA_user_PERSO
Host github
HostName github.com
User git
IdentityFile /home/baptiste/.ssh/id_rsa/id_RSA_user_PERSO
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# Fingerprint & Key
ssh-keygen -l -f ~/.ssh/id_rsa/id_RSA_user_github.pub
4096 SHA256:w7bMJ3RsS6Rz6u64WD2tjuNGLn+7o21yBBDSttBUz3M github (RSA)
2
ssh-keygen -l -f ~/.ssh/id_rsa/id_RSA_user_github.pub -E md5
ssh-keygen -l -f ~/.ssh/id_rsa/id_RSA_user_github.pub -E sha1
ssh-keygen -l -f ~/.ssh/id_rsa/id_RSA_user_github.pub -E sha256
2
3
# Get Public SSH Key from Private
-y
: Read a private OpenSSH format file and print an OpenSSH public key to stdout
ssh-keygen -y -f ./ed_25519.perso > ed_25519.perso.pub
# How to save an SSH key passphrase in gnome-keyring?
- Use
ssh-askpass
to add your ssh keys to your keyring. - BUT with wayland (instead of X11, xorg server) I didn't find how to do it. So you can also use
keychain
sudo dnf install keychain
Then add this to your .bashrc
or .zshrc
depending on your shell...
eval $(keychain --eval --quiet ~/.ssh/keys/id_RSA_user)
And then reload your shell.
exec $SHELL
#### Inside script issues by default ssh
reads stdin
. When ssh is run in the background or in a script we need to redirect /dev/null into stdin.
Here is what we can do.
ssh shadows.cs.hut.fi "uname -a" < /dev/null
ssh -n shadows.cs.hut.fi "uname -a"
2
3
# Test multiple ssh connexion use case
Will generate an output file containing 1 IP / line
for minion in minion1 minion2 database_dev random_id debian minion3 \
; do ipam $minion | tail -n 1 | awk '{print $1}' \
>> minions.list \
; done
2
3
4
Run parallelized exit
after a test of a ssh connection
while read minion_ip; do
(ssh -n $minion_ip exit \
&& echo Success \
|| echo CONNECTION_ERROR) &
done <minions.list
2
3
4
5
6
Test sshd config before reloading (avoid fail on restart/reload and cutting our own hand)
sshd = ssh daemon
sshd -t
Test connection to multiple servers
for outscale_instance in 10.10.10.1 10.10.10.2 10.10.10.3 10.10.10.4 \
; do ssh $outscale_instance -q exit \
&& echo "$outscale_instance :" connection succeed \
|| echo "$outscale_instance :" connection failed \
; done
10.10.10.1 : connection succeed
10.10.10.2 : connection succeed
10.10.10.3 : connection failed
10.10.10.4 : connection succeed
2
3
4
5
6
7
8
9
quickly copy your ssh public key to a remote server
cat ~/.ssh/id_ed25519.pub | ssh pi@192.168.1.41 "mkdir -p ~/.ssh && chmod 700 ~/.ssh && cat >> ~/.ssh/authorized_keys"
# rsync using ssh
-a
: archive mode -u
: update mode, not full copy
rsync -au --progress -e "ssh -i path/to/private_key" user@10.10.10.10:~/remote_path /output/path
# Troubleshooting
# No mutual signature algorithm | Client <=> Server
Sometimes it may happend that all is well setup, but you get Permission denied (publickey).
anyway.
It may be caused due to a too big version difference between your client and the server.
Check if you have algorithm
issue. Let's run a verbose connection.
ssh user@example.com -v
...
debug1: send_pubkey_test: no mutual signature algorithm
...
2
3
It happend to me between my Fedora 33
ssh client and a Debian 8
ssh server.
In that case you can edit your ssh client config ~/.ssh/config
Host 10.10.10.10
PubkeyAcceptedKeyTypes +ssh-rsa
2
# Manual (man)
# Common command
(1) User Commands
(2) System Calls
(3) Library functions
(4) Devices
(5) File formats
(6) Games and Amusements
(7) Conventions and Miscellany
(8) System Administration and Priveledged Commands
(L) Local. Some programs install their man pages into this section instead
(N) TCL commands
2
3
4
5
6
7
8
9
10
# Parse manual
Default pager
of man is less
, otherwise, fall back to cat
man rpc.nfsd
MANPAGER=less man rpc.nfsd
MANPAGER=cat man rpc.nfsd
MANPAGER=cat man rpc.nfsd | grep -i version --color
2
3
4
5
6
7