# Basic substitution (first occurrence)sed's/old/new/'file
# Global substitution (all occurrences)sed's/old/new/g'file
# Case-insensitive substitutionsed's/old/new/gi'file
# Substitute on specific linesed'3 s/old/new/'file
# Substitute on rangesed'1,5 s/old/new/g'file
# Substitute from line to endsed'10,$ s/old/new/g'file
# Print only changed linessed-n's/old/new/gp'file
# Write changes to filesed-i's/old/new/g'file
# With backupsed-i.bak's/old/new/g'file
Delete
# Delete line 3sed'3d'file
# Delete lines 1-5sed'1,5d'file
# Delete last linesed'$d'file
# Delete blank linessed'/^$/d'file
# Delete lines matching patternsed'/pattern/d'file
# Delete lines NOT matching patternsed'/pattern/!d'file
# Delete lines starting with #sed'/^#/d'file
# Delete trailing whitespacesed's/[[:space:]]*$//'file
Print
# Print line 3sed-n'3p'file
# Print lines 1-5sed-n'1,5p'file
# Print lines matching patternsed-n'/pattern/p'file
# Print lines NOT matching patternsed-n'/pattern/!p'file
# Print and delete (move to end)sed-n'/pattern/{p;d;}'file
# Print line numbersed-n'/pattern/='file
# Print with line numberssed=file|sed'N;s/\n/\t/'
Insert & Append
# Insert before line 3sed'3i\New line'file
# Append after line 3sed'3a\New line'file
# Insert before matching patternsed'/pattern/i\New line'file
# Append after matching patternsed'/pattern/a\New line'file
# Insert at beginningsed'1i\Header line'file
# Append at endsed'$a\Footer line'file
# Lines 10 to 20sed-n'10,20p'file
# Line 5 to endsed-n'5,$p'file
# First occurrence of pattern to line 50sed-n'/start/,50p'file
# Between two patternssed-n'/start/,/end/p'file
# From pattern to 5 lines aftersed-n'/pattern/,+5p'file
# Every 2nd linesed-n'1~2p'file
# Every 5th line starting from line 10sed-n'10~5p'file
Multiple Commands
# Multiple substitutionssed-e's/foo/bar/g'-e's/hello/world/g'file
# Alternative syntaxsed's/foo/bar/g; s/hello/world/g'file
# From script filecatscript.sed
s/foo/bar/g
s/hello/world/g
sed-fscript.sedfile
# Pipe commandssed'1,10d'file|sed's/old/new/g'
Backreferences
# Capture groupssed's/\(.*\)@\(.*\)/\2@\1/'file
# Swap two wordssed's/\(word1\) \(word2\)/\2 \1/'file
# Extract domain from emailsed's/.*@\(.*\)/\1/'emails.txt
# Add quotes around wordssed's/\([a-z]*\)/"\1"/g'file
# Duplicate each linesed'p'file
# Duplicate and modifysed'p; s/foo/bar/'file
Hold Space
# Reverse lines (tac alternative)sed'1!G;h;$!d'file
# Remove duplicate consecutive linessed'$!N; /^\(.*\)\n\1$/!P; D'file
# Print every other line (odd lines)sed-n'1~2p'file
# Print every other line (even lines)sed-n'2~2p'file
# Join every two linessed'N;s/\n/ /'file
Sed One-Liners
Text Manipulation
# Double space a filesedGfile
# Remove double spacingsed'n;d'file
# Number non-blank linessed'/./='file|sed'N;s/\n/ /'# Center text (assuming 80 columns)sed's/^/ /;s/^\(.\{40\}\).*/\1/'file
# Right-align text (80 columns)sed's/^/ /;s/^\(.*\).\{80\}/\1/'file
# Convert DOS to Unix (remove CR)sed's/\r$//'file
# Convert Unix to DOS (add CR)sed's/$/\r/'file
# Remove HTML tagssed's/<[^>]*>//g'file
# Extract emailssed-n'/[a-zA-Z0-9._%+-]*@[a-zA-Z0-9.-]*\.[a-zA-Z]\{2,\}/p'file
Common Tasks
# Replace multiple spaces with single spacesed's/ */ /g'file
# Remove leading spacessed's/^[ \t]*//'file
# Remove trailing spacessed's/[ \t]*$//'file
# Remove leading and trailing spacessed's/^[ \t]*//;s/[ \t]*$//'file
# Convert to lowercasesed's/.*/\L&/'file
# Convert to uppercasesed's/.*/\U&/'file
# Add line numberssed=file|sed'N;s/\n/\t/'# Remove line numberssed's/^[ ]*[0-9]*[ ]*//'file
# Comment linessed's/^/# /'file
# Uncomment linessed's/^# //'file
Awk Basics
Syntax
awk'pattern { action }'file
awk-F:'{ print $1 }'file# Field separatorawk-vvar=value'{ print var }'file# Variables
Built-in Variables
Variable
Description
$0
Entire line
$1, $2, ...
Field 1, 2, ...
NF
Number of fields
NR
Number of records (line number)
FNR
File number of records
FS
Field separator (default: whitespace)
OFS
Output field separator (default: space)
RS
Record separator (default: newline)
ORS
Output record separator (default: newline)
FILENAME
Current filename
Awk Commands
Print
# Print entire lineawk'{ print }'file
awk'{ print $0 }'file
# Print specific fieldsawk'{ print $1 }'file
awk'{ print $1, $3 }'file
# Print with separatorawk'{ print $1 ":" $2 }'file
# Print last fieldawk'{ print $NF }'file
# Print second to last fieldawk'{ print $(NF-1) }'file
# Print field and line numberawk'{ print NR, $1 }'file
# Print with custom OFSawk'BEGIN { OFS="\t" } { print $1, $2 }'file
# For loopawk'{ for (i=1; i<=NF; i++) print $i }'file
# While loopawk'{ i=1; while (i<=NF) { print $i; i++ } }'file
# Do-while loopawk'{ i=1; do { print $i; i++ } while (i<=NF) }'file
# Loop through fieldsawk'{ for (i=1; i<=NF; i++) sum+=$i; print sum }'file
Awk One-Liners
Statistics
# Sum columnawk'{ sum += $1 } END { print sum }'file
# Averageawk'{ sum += $1; n++ } END { print sum/n }'file
# Count linesawk'END { print NR }'file
# Count matching linesawk'/pattern/ { count++ } END { print count }'file
# Min and maxawk'NR==1 { min=$1; max=$1 } { if ($1<min) min=$1; if ($1>max) max=$1 } END { print min, max }'file
# Sum, average, min, maxawk'{ sum+=$1; if (NR==1) {min=max=$1}; if ($1<min) min=$1; if ($1>max) max=$1 } END { print "Sum:", sum, "Avg:", sum/NR, "Min:", min, "Max:", max }'file
Text Processing
# Print specific columnsawk'{ print $1, $3 }'file
# Swap columnsawk'{ print $2, $1 }'file
# Add columnawk'{ print $0, $1+$2 }'file
# Remove duplicatesawk'!seen[$0]++'file
# Count occurrencesawk'{ count[$1]++ } END { for (word in count) print word, count[word] }'file
# Reverse field orderawk'{ for (i=NF; i>0; i--) printf "%s%s", $i, (i>1 ? OFS : ORS) }'file
# Join linesawk'{ printf "%s ", $0 } END { print "" }'file
# Split and printawk'{ split($0, arr, ":"); print arr[1] }'file
Filtering
# Print lines longer than 80 charactersawk'length > 80'file
# Print lines with more than 5 fieldsawk'NF > 5'file
# Print odd-numbered linesawk'NR % 2 == 1'file
# Print even-numbered linesawk'NR % 2 == 0'file
# Print lines 10-20awk'NR >= 10 && NR <= 20'file
# Print every 5th lineawk'NR % 5 == 0'file
# Print unique lines (in order)awk'!seen[$0]++'file
# Print duplicate linesawk'seen[$0]++'file
# Count HTTP status codesawk'{ print $9 }'access.log|sort|uniq-c
# Sum bytes transferredawk'{ sum += $10 } END { print sum }'access.log
# Top 10 IPsawk'{ print $1 }'access.log|sort|uniq-c|sort-rn|head
# Requests per hourawk'{ print substr($4, 2, 14) }'access.log|uniq-c
# Filter by status codeawk'$9 == 500'access.log
# Calculate response time percentilesawk'{ print $NF }'access.log|sort-n|awk'{ a[NR]=$1 } END { print "50th:", a[int(NR*0.5)], "95th:", a[int(NR*0.95)], "99th:", a[int(NR*0.99)] }'
File Comparison
# Print lines in file1 not in file2awk'NR==FNR { a[$0]; next } !($0 in a)'file2file1
# Print lines common to both filesawk'NR==FNR { a[$0]; next } $0 in a'file1file2
# Join two files on first fieldawk'NR==FNR { a[$1]=$2; next } { print $0, a[$1] }'file1file2
# Merge columns from two filespastefile1file2|awk'{ print $1, $3 }'
Practical Examples
System Administration
# Parse /etc/passwdawk-F:'{ print $1, $3, $6 }'/etc/passwd
# Users with UID >= 1000awk-F:'$3 >= 1000 { print $1 }'/etc/passwd
# Find largest filesls-lh|awk'$5 ~ /G$/ { print $9, $5 }'# Process list summarypsaux|awk'{ sum += $6 } END { print "Total memory:", sum/1024, "MB" }'# Disk usage summarydf-h|awk'$5+0 > 80 { print $6, $5 }'# Network connections by statenetstat-an|awk'/^tcp/ { state[$6]++ } END { for (s in state) print s, state[s] }'
Data Processing
# Convert JSON array to CSV (simple)sed's/[{}\[\]]//g'data.json|sed's/"//g'|awk-F,'{ print $1 "," $2 }'# Extract URLs from HTMLsed's/<a href="/\n/g'page.html|awk-F'"''/^http/ { print $1 }'# Parse Apache access logawk'{ print $1 }'access.log|sort|uniq-c|sort-rn|head
# Generate report from CSVawk-F',''NR==1 { print; next } { sum+=$3; count++ } END { print "Average:", sum/count }'data.csv
# Transpose CSVawk-F',''{ for (i=1; i<=NF; i++) a[i,NR]=$i; nf=NF } END { for (i=1; i<=nf; i++) { for (j=1; j<=NR; j++) printf "%s%s", a[i,j], (j<NR ? "," : "\n") } }'data.csv
Configuration Files
# Remove comments and blank linessed'/^#/d; /^$/d'config.conf
# Extract values from key=valueawk-F='/^[^#]/ { print $2 }'config.conf
# Change configuration valuesed-i's/^DEBUG=.*/DEBUG=true/'config.conf
# Add line after patternsed'/\[section\]/a new_key=value'config.ini
# Validate JSON (basic check)sed's/[^{}[\],:]//g'file.json|awk'{ gsub(/[,:]/, ""); print } END { if (gsub(/{/, "&") != gsub(/}/, "&")) print "Unbalanced braces" }'
Tips & Best Practices
Performance
Use sed for simple substitutions
Use awk for field-based processing
Avoid unnecessary pipes: cat file | sed → sed file
Use -n with sed when you don't need all output
Process files in place with sed -i instead of temp files
Debugging
# Sed debuggingsed-n'l'file# Show special characters# Awk debuggingawk'{ print "Debug:", $0 }'file
# Test patternsecho"test string"|sed's/pattern/replacement/'echo"field1 field2"|awk'{ print $2 }'
Common Pitfalls
# WRONG: Modifying file while readingsed-i's/old/new/'file.txt<file.txt
# CORRECT: Use -i with sedsed-i's/old/new/'file.txt
# WRONG: Using regex without escapingsed's/192.168.1.1/10.0.0.1/'file
# CORRECT: Escape dotssed's/192\.168\.1\.1/10.0.0.1/'file
# WRONG: Forgetting to set FSawk'{ print $2 }'/etc/passwd
# CORRECT: Set field separatorawk-F:'{ print $2 }'/etc/passwd