重要前提
安装AI Skills的关键前提是:必须科学上网,且开启TUN模式,这一点至关重要,直接决定安装能否顺利完成,在此郑重提醒三遍:科学上网,科学上网,科学上网。查看完整安装教程 →
process-substitution-fifos by josiahsiegel/claude-plugin-marketplace
npx skills add https://github.com/josiahsiegel/claude-plugin-marketplace --skill process-substitution-fifos强制要求:在 Windows 上始终对文件路径使用反斜杠
在 Windows 上使用编辑或写入工具时,必须在文件路径中使用反斜杠(\),而非正斜杠(/)。
掌握使用进程替换、命名管道(FIFO)和高效数据流技术的 bash 高级进程间通信模式。这些模式无需临时文件即可实现强大的数据管道。
<(command)#!/usr/bin/env bash
set -euo pipefail
# 比较两个命令输出
diff <(sort file1.txt) <(sort file2.txt)
# 比较远程和本地文件
diff <(ssh server 'cat /etc/config') /etc/config
# 合并已排序文件
sort -m <(sort file1.txt) <(sort file2.txt) <(sort file3.txt)
# 同时从多个源读取
paste <(cut -f1 data.tsv) <(cut -f3 data.tsv)
# 将命令输出馈送给期望文件的程序
# 许多程序需要文件名参数,而非标准输入
wc -l <(grep "error" *.log)
# 使用期望文件的工具处理 API 响应
jq '.items[]' <(curl -s "https://api.example.com/data")
# 从命令输出中获取环境变量
source <(aws configure export-credentials --format env)
# 馈送给 while 循环,避免子 shell 问题
while IFS= read -r line; do
((count++))
process "$line"
done < <(find . -name "*.txt")
echo "Processed $count files" # 变量得以保留!
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
>(command)#!/usr/bin/env bash
set -euo pipefail
# 同时写入多个目标(tee 的替代方案)
echo "Log message" | tee >(logger -t myapp) >(mail -s "Alert" admin@example.com)
# 单次传递完成压缩和校验和计算
tar cf - /data | tee >(gzip > backup.tar.gz) >(sha256sum > backup.sha256)
# 将输出发送到多个处理器
generate_data | tee >(processor1 > result1.txt) >(processor2 > result2.txt) > /dev/null
# 同时记录日志和处理
./build.sh 2>&1 | tee >(grep -i error > errors.log) >(grep -i warning > warnings.log)
# 具有多个输出的实时过滤
tail -f /var/log/syslog | tee \
>(grep --line-buffered "ERROR" >> errors.log) \
>(grep --line-buffered "WARNING" >> warnings.log) \
>(grep --line-buffered "CRITICAL" | mail -s "Critical Alert" admin@example.com)
#!/usr/bin/env bash
set -euo pipefail
# 转换并比较
diff <(sort input.txt | uniq) <(sort reference.txt | uniq)
# 具有多个分支的管道
cat data.csv | tee \
>(awk -F, '{print $1}' > column1.txt) \
>(awk -F, '{print $2}' > column2.txt) \
| wc -l
# 复杂数据流
process_data() {
local input="$1"
# 从进程替换读取,写入多个输出
while IFS= read -r line; do
echo "$line" | tee \
>(echo "LOG: $line" >> "$log_file") \
>(process_line "$line" >> results.txt)
done < <(cat "$input" | filter_input)
}
#!/usr/bin/env bash
set -euo pipefail
# 创建 FIFO
mkfifo my_pipe
# 退出时清理
trap 'rm -f my_pipe' EXIT
# 写入者(在后台或单独终端)
echo "Hello from writer" > my_pipe &
# 读取者(阻塞直到数据可用)
cat < my_pipe
# 使用超时(使用 read)
if read -t 5 line < my_pipe; then
echo "Received: $line"
else
echo "Timeout waiting for data"
fi
#!/usr/bin/env bash
set -euo pipefail
# 为双向通信创建两个 FIFO
REQUEST_PIPE="/tmp/request_$$"
RESPONSE_PIPE="/tmp/response_$$"
mkfifo "$REQUEST_PIPE" "$RESPONSE_PIPE"
trap 'rm -f "$REQUEST_PIPE" "$RESPONSE_PIPE"' EXIT
# 服务器进程
server() {
while true; do
if read -r request < "$REQUEST_PIPE"; then
case "$request" in
"QUIT")
echo "BYE" > "$RESPONSE_PIPE"
break
;;
"TIME")
date > "$RESPONSE_PIPE"
;;
"UPTIME")
uptime > "$RESPONSE_PIPE"
;;
*)
echo "UNKNOWN: $request" > "$RESPONSE_PIPE"
;;
esac
fi
done
}
# 客户端函数
send_request() {
local request="$1"
echo "$request" > "$REQUEST_PIPE"
cat < "$RESPONSE_PIPE"
}
# 在后台启动服务器
server &
SERVER_PID=$!
# 发送请求
send_request "TIME"
send_request "UPTIME"
send_request "QUIT"
wait "$SERVER_PID"
#!/usr/bin/env bash
set -euo pipefail
WORK_QUEUE="/tmp/work_queue_$$"
mkfifo "$WORK_QUEUE"
trap 'rm -f "$WORK_QUEUE"' EXIT
# 生产者
producer() {
local item
for item in {1..100}; do
echo "TASK:$item"
done
echo "DONE"
}
# 消费者(可以有多个)
consumer() {
local id="$1"
while read -r item; do
[[ "$item" == "DONE" ]] && break
echo "Consumer $id processing: $item"
sleep 0.1 # 模拟工作
done
}
# 启动消费者(它们将阻塞等待数据)
consumer 1 < "$WORK_QUEUE" &
consumer 2 < "$WORK_QUEUE" &
consumer 3 < "$WORK_QUEUE" &
# 启动生产者
producer > "$WORK_QUEUE"
wait
echo "All work complete"
#!/usr/bin/env bash
set -euo pipefail
FIFO="/tmp/fd_fifo_$$"
mkfifo "$FIFO"
trap 'rm -f "$FIFO"' EXIT
# 在文件描述符 3 上以读写方式打开 FIFO
# 同时打开读写两端可防止在打开时阻塞
exec 3<>"$FIFO"
# 通过文件描述符写入 FIFO
echo "Message 1" >&3
echo "Message 2" >&3
# 通过文件描述符从 FIFO 读取
read -r msg1 <&3
read -r msg2 <&3
echo "Got: $msg1, $msg2"
# 关闭文件描述符
exec 3>&-
#!/usr/bin/env bash
set -euo pipefail
# 启动协进程(双向管道)
coproc BC { bc -l; }
# 发送数据到协进程
echo "scale=10; 355/113" >&"${BC[1]}"
# 读取结果
read -r result <&"${BC[0]}"
echo "Pi approximation: $result"
# 更多计算
echo "sqrt(2)" >&"${BC[1]}"
read -r sqrt2 <&"${BC[0]}"
echo "Square root of 2: $sqrt2"
# 关闭写入端以发送 EOF 信号
exec {BC[1]}>&-
# 等待协进程完成
wait "$BC_PID"
#!/usr/bin/env bash
set -euo pipefail
# 用于 Python 解释器的命名协进程
coproc PYTHON { python3 -u -c "
import sys
for line in sys.stdin:
exec(line.strip())
"; }
# 发送 Python 命令
echo "print('Hello from Python')" >&"${PYTHON[1]}"
read -r output <&"${PYTHON[0]}"
echo "Python said: $output"
echo "print(2**100)" >&"${PYTHON[1]}"
read -r big_num <&"${PYTHON[0]}"
echo "2^100 = $big_num"
# 清理
exec {PYTHON[1]}>&-
wait "$PYTHON_PID" 2>/dev/null || true
#!/usr/bin/env bash
set -euo pipefail
# 创建工作协进程池
declare -A WORKERS
declare -A WORKER_PIDS
start_workers() {
local count="$1"
local i
for ((i=0; i<count; i++)); do
# 每个协进程运行一个处理循环
coproc "WORKER_$i" {
while IFS= read -r task; do
[[ "$task" == "QUIT" ]] && exit 0
# 模拟工作
sleep 0.1
echo "DONE:$task"
done
}
# 动态存储文件描述符
local -n write_fd="WORKER_${i}[1]"
local -n read_fd="WORKER_${i}[0]"
local -n pid="WORKER_${i}_PID"
WORKERS["$i,in"]="$write_fd"
WORKERS["$i,out"]="$read_fd"
WORKER_PIDS["$i"]="$pid"
done
}
# 注意:协进程池管理很复杂
# 对于生产工作负载,请考虑使用 GNU Parallel
#!/usr/bin/env bash
set -euo pipefail
PROGRESS_PIPE="/tmp/progress_$$"
mkfifo "$PROGRESS_PIPE"
trap 'rm -f "$PROGRESS_PIPE"' EXIT
# 进度监视器
monitor_progress() {
local total="$1"
local current=0
while read -r update; do
((current++))
local pct=$((current * 100 / total))
printf "\rProgress: [%-50s] %d%%" \
"$(printf '#%.0s' $(seq 1 $((pct/2))))" "$pct"
done < "$PROGRESS_PIPE"
echo
}
# 报告进度的 worker
do_work() {
local items=("$@")
local item
for item in "${items[@]}"; do
process_item "$item"
echo "done" > "$PROGRESS_PIPE"
done
}
# 用法
items=(item1 item2 item3 ... item100)
monitor_progress "${#items[@]}" &
MONITOR_PID=$!
do_work "${items[@]}"
exec 3>"$PROGRESS_PIPE" # 保持管道打开
exec 3>&- # 关闭以发送完成信号
wait "$MONITOR_PID"
#!/usr/bin/env bash
set -euo pipefail
LOG_DIR="/tmp/logs_$$"
mkdir -p "$LOG_DIR"
# 为每个日志级别创建 FIFO
for level in DEBUG INFO WARN ERROR; do
mkfifo "$LOG_DIR/$level"
done
trap 'rm -rf "$LOG_DIR"' EXIT
# 聚合器进程
aggregate_logs() {
local output_file="$1"
# 打开所有 FIFO 进行读取
exec 3<"$LOG_DIR/DEBUG"
exec 4<"$LOG_DIR/INFO"
exec 5<"$LOG_DIR/WARN"
exec 6<"$LOG_DIR/ERROR"
while true; do
# 使用带超时的类似 select 的行为
read -t 0.1 -r msg <&3 && echo "[DEBUG] $(date '+%H:%M:%S') $msg" >> "$output_file"
read -t 0.1 -r msg <&4 && echo "[INFO] $(date '+%H:%M:%S') $msg" >> "$output_file"
read -t 0.1 -r msg <&5 && echo "[WARN] $(date '+%H:%M:%S') $msg" >> "$output_file"
read -t 0.1 -r msg <&6 && echo "[ERROR] $(date '+%H:%M:%S') $msg" >> "$output_file"
done
}
# 日志记录函数
log_debug() { echo "$*" > "$LOG_DIR/DEBUG"; }
log_info() { echo "$*" > "$LOG_DIR/INFO"; }
log_warn() { echo "$*" > "$LOG_DIR/WARN"; }
log_error() { echo "$*" > "$LOG_DIR/ERROR"; }
# 启动聚合器
aggregate_logs "/var/log/app.log" &
AGGREGATOR_PID=$!
# 应用程序代码使用日志记录函数
log_info "Application started"
log_debug "Processing item"
log_warn "Resource running low"
log_error "Critical failure"
# 清理
kill "$AGGREGATOR_PID" 2>/dev/null
#!/usr/bin/env bash
set -euo pipefail
# 带缓冲的管道阶段
buffered_stage() {
local name="$1"
local buffer_size="${2:-100}"
local buffer=()
while IFS= read -r line || [[ ${#buffer[@]} -gt 0 ]]; do
if [[ -n "$line" ]]; then
buffer+=("$line")
fi
# 缓冲区满或遇到 EOF 时刷新
if [[ ${#buffer[@]} -ge $buffer_size ]] || [[ -z "$line" && ${#buffer[@]} -gt 0 ]]; then
printf '%s\n' "${buffer[@]}" | process_batch
buffer=()
fi
done
}
# 使用进程替换的并行管道
run_parallel_pipeline() {
local input="$1"
cat "$input" | \
tee >(filter_a | transform_a > output_a.txt) \
>(filter_b | transform_b > output_b.txt) \
>(filter_c | transform_c > output_c.txt) \
> /dev/null
# 等待所有后台进程
wait
}
#!/usr/bin/env bash
set -euo pipefail
# 流式处理 JSON 数组元素
stream_json_array() {
local url="$1"
# 使用 jq 将数组元素每行一个流式输出
curl -s "$url" | jq -c '.items[]' | while IFS= read -r item; do
process_json_item "$item"
done
}
# 使用进程替换的并行 JSON 处理
parallel_json_process() {
local input="$1"
local workers=4
# 将输入拆分到多个 worker
jq -c '.[]' "$input" | \
parallel --pipe -N100 --jobs "$workers" '
while IFS= read -r item; do
echo "$item" | jq ".processed = true"
done
' | jq -s '.'
}
# 转换 JSON 流
transform_json_stream() {
jq -c '.' | while IFS= read -r obj; do
# 使用 bash 处理
local id
id=$(echo "$obj" | jq -r '.id')
# 丰富内容并输出
echo "$obj" | jq --arg ts "$(date -Iseconds)" '. + {timestamp: $ts}'
done
}
#!/usr/bin/env bash
# 需要 Bash 5.3+
set -euo pipefail
# 传统方式:创建子 shell
result=$(echo "hello")
# Bash 5.3:无分支,在当前 shell 中运行
result=${ echo "hello"; }
# 对变量修改很重要
counter=0
# 传统方式 - counter 保持为 0(子 shell)
result=$(counter=$((counter + 1)); echo "$counter")
echo "Counter: $counter" # 仍为 0
# Bash 5.3 - counter 被修改(同一 shell)
result=${ counter=$((counter + 1)); echo "$counter"; }
echo "Counter: $counter" # 现在为 1
# REPLY 变量语法(更简洁)
${ REPLY="computed value"; }
echo "$REPLY"
# 或使用 ${| } 语法
${| REPLY=$(expensive_computation); }
echo "Result: $REPLY"
#!/usr/bin/env bash
# 需要 Bash 5.3+
set -euo pipefail
# 无需分支构建结果
build_path() {
local parts=("$@")
local result=""
for part in "${parts[@]}"; do
# 每次连接都无需分支
result=${ printf '%s/%s' "$result" "$part"; }
done
echo "${result#/}"
}
# 高效累加值
accumulate() {
local -n arr="$1"
local sum=0
for val in "${arr[@]}"; do
# 内联算术捕获
sum=${ echo $((sum + val)); }
done
echo "$sum"
}
#!/usr/bin/env bash
set -euo pipefail
# 检查所有管道阶段
run_pipeline() {
local result
# pipefail 确保捕获任何阶段的错误
if ! result=$(stage1 | stage2 | stage3); then
echo "Pipeline failed" >&2
return 1
fi
echo "$result"
}
# 使用 PIPESTATUS 获取详细错误信息
run_with_status() {
cmd1 | cmd2 | cmd3
local -a status=("${PIPESTATUS[@]}")
for i in "${!status[@]}"; do
if [[ "${status[$i]}" -ne 0 ]]; then
echo "Stage $i failed with status ${status[$i]}" >&2
fi
done
# 返回最高退出状态
local max=0
for s in "${status[@]}"; do
((s > max)) && max="$s"
done
return "$max"
}
#!/usr/bin/env bash
set -euo pipefail
# 跟踪资源以进行清理
declare -a CLEANUP_PIDS=()
declare -a CLEANUP_FILES=()
cleanup() {
local pid file
for pid in "${CLEANUP_PIDS[@]}"; do
kill "$pid" 2>/dev/null || true
done
for file in "${CLEANUP_FILES[@]}"; do
rm -f "$file" 2>/dev/null || true
done
}
trap cleanup EXIT
# 注册清理
register_pid() { CLEANUP_PIDS+=("$1"); }
register_file() { CLEANUP_FILES+=("$1"); }
# 示例用法
run_safe_pipeline() {
local fifo="/tmp/pipeline_$$"
mkfifo "$fifo"
register_file "$fifo"
producer > "$fifo" &
register_pid "$!"
consumer < "$fifo" &
register_pid "$!"
wait
}
#!/usr/bin/env bash
set -euo pipefail
# 包含 PID 和描述性名称
create_fifo() {
local name="$1"
local fifo="/tmp/${name}_$$_$(date +%s)"
mkfifo -m 600 "$fifo" # 限制性权限
echo "$fifo"
}
# 为安全性使用 tmpdir
create_secure_fifo() {
local name="$1"
local tmpdir
tmpdir=$(mktemp -d)
local fifo="$tmpdir/$name"
mkfifo -m 600 "$fifo"
echo "$fifo"
}
#!/usr/bin/env bash
set -euo pipefail
# ✗ 死锁 - 写入者阻塞,读取者永不启动
# mkfifo pipe
# echo "data" > pipe # 永久阻塞
# ✓ 安全 - 打开两端或使用后台
mkfifo pipe
trap 'rm -f pipe' EXIT
# 选项 1:后台写入者
echo "data" > pipe &
cat < pipe
# 选项 2:以读写方式打开
exec 3<>pipe
echo "data" >&3
read -r data <&3
exec 3>&-
# 选项 3:非阻塞打开(需要小心处理)
exec 3<pipe &
exec 4>pipe
echo "data" >&4
read -r data <&3
#!/usr/bin/env bash
set -euo pipefail
# 带超时的读取
read_with_timeout() {
local fifo="$1"
local timeout="$2"
local result
if read -t "$timeout" -r result < "$fifo"; then
echo "$result"
return 0
else
echo "Timeout after ${timeout}s" >&2
return 1
fi
}
# 带超时的写入(使用 timeout 命令)
write_with_timeout() {
local fifo="$1"
local timeout="$2"
local data="$3"
if timeout "$timeout" bash -c "echo '$data' > '$fifo'"; then
return 0
else
echo "Write timeout after ${timeout}s" >&2
return 1
fi
}
掌握进程替换和 FIFO,实现无需临时文件的高效进程间通信。
每周安装次数
65
仓库
GitHub 星标数
21
首次出现时间
2026年1月24日
安全审计
安装于
claude-code51
opencode51
gemini-cli49
codex48
cursor44
github-copilot42
MANDATORY: Always Use Backslashes on Windows for File Paths
When using Edit or Write tools on Windows, you MUST use backslashes (\) in file paths, NOT forward slashes (/).
Master advanced inter-process communication patterns in bash using process substitution, named pipes (FIFOs), and efficient data streaming techniques. These patterns enable powerful data pipelines without temporary files.
<(command)#!/usr/bin/env bash
set -euo pipefail
# Compare two command outputs
diff <(sort file1.txt) <(sort file2.txt)
# Compare remote and local files
diff <(ssh server 'cat /etc/config') /etc/config
# Merge sorted files
sort -m <(sort file1.txt) <(sort file2.txt) <(sort file3.txt)
# Read from multiple sources simultaneously
paste <(cut -f1 data.tsv) <(cut -f3 data.tsv)
# Feed command output to programs expecting files
# Many programs require filename arguments, not stdin
wc -l <(grep "error" *.log)
# Process API response with tool expecting file
jq '.items[]' <(curl -s "https://api.example.com/data")
# Source environment from command output
source <(aws configure export-credentials --format env)
# Feed to while loop without subshell issues
while IFS= read -r line; do
((count++))
process "$line"
done < <(find . -name "*.txt")
echo "Processed $count files" # Variable survives!
>(command)#!/usr/bin/env bash
set -euo pipefail
# Write to multiple destinations simultaneously (tee alternative)
echo "Log message" | tee >(logger -t myapp) >(mail -s "Alert" admin@example.com)
# Compress and checksum in one pass
tar cf - /data | tee >(gzip > backup.tar.gz) >(sha256sum > backup.sha256)
# Send output to multiple processors
generate_data | tee >(processor1 > result1.txt) >(processor2 > result2.txt) > /dev/null
# Log and process simultaneously
./build.sh 2>&1 | tee >(grep -i error > errors.log) >(grep -i warning > warnings.log)
# Real-time filtering with multiple outputs
tail -f /var/log/syslog | tee \
>(grep --line-buffered "ERROR" >> errors.log) \
>(grep --line-buffered "WARNING" >> warnings.log) \
>(grep --line-buffered "CRITICAL" | mail -s "Critical Alert" admin@example.com)
#!/usr/bin/env bash
set -euo pipefail
# Transform and compare
diff <(sort input.txt | uniq) <(sort reference.txt | uniq)
# Pipeline with multiple branches
cat data.csv | tee \
>(awk -F, '{print $1}' > column1.txt) \
>(awk -F, '{print $2}' > column2.txt) \
| wc -l
# Complex data flow
process_data() {
local input="$1"
# Read from process substitution, write to multiple outputs
while IFS= read -r line; do
echo "$line" | tee \
>(echo "LOG: $line" >> "$log_file") \
>(process_line "$line" >> results.txt)
done < <(cat "$input" | filter_input)
}
#!/usr/bin/env bash
set -euo pipefail
# Create FIFO
mkfifo my_pipe
# Clean up on exit
trap 'rm -f my_pipe' EXIT
# Writer (in background or separate terminal)
echo "Hello from writer" > my_pipe &
# Reader (blocks until data available)
cat < my_pipe
# With timeout (using read)
if read -t 5 line < my_pipe; then
echo "Received: $line"
else
echo "Timeout waiting for data"
fi
#!/usr/bin/env bash
set -euo pipefail
# Create two FIFOs for bidirectional communication
REQUEST_PIPE="/tmp/request_$$"
RESPONSE_PIPE="/tmp/response_$$"
mkfifo "$REQUEST_PIPE" "$RESPONSE_PIPE"
trap 'rm -f "$REQUEST_PIPE" "$RESPONSE_PIPE"' EXIT
# Server process
server() {
while true; do
if read -r request < "$REQUEST_PIPE"; then
case "$request" in
"QUIT")
echo "BYE" > "$RESPONSE_PIPE"
break
;;
"TIME")
date > "$RESPONSE_PIPE"
;;
"UPTIME")
uptime > "$RESPONSE_PIPE"
;;
*)
echo "UNKNOWN: $request" > "$RESPONSE_PIPE"
;;
esac
fi
done
}
# Client function
send_request() {
local request="$1"
echo "$request" > "$REQUEST_PIPE"
cat < "$RESPONSE_PIPE"
}
# Start server in background
server &
SERVER_PID=$!
# Send requests
send_request "TIME"
send_request "UPTIME"
send_request "QUIT"
wait "$SERVER_PID"
#!/usr/bin/env bash
set -euo pipefail
WORK_QUEUE="/tmp/work_queue_$$"
mkfifo "$WORK_QUEUE"
trap 'rm -f "$WORK_QUEUE"' EXIT
# Producer
producer() {
local item
for item in {1..100}; do
echo "TASK:$item"
done
echo "DONE"
}
# Consumer (can have multiple)
consumer() {
local id="$1"
while read -r item; do
[[ "$item" == "DONE" ]] && break
echo "Consumer $id processing: $item"
sleep 0.1 # Simulate work
done
}
# Start consumers (they'll block waiting for data)
consumer 1 < "$WORK_QUEUE" &
consumer 2 < "$WORK_QUEUE" &
consumer 3 < "$WORK_QUEUE" &
# Start producer
producer > "$WORK_QUEUE"
wait
echo "All work complete"
#!/usr/bin/env bash
set -euo pipefail
FIFO="/tmp/fd_fifo_$$"
mkfifo "$FIFO"
trap 'rm -f "$FIFO"' EXIT
# Open FIFO for read/write on FD 3
# Opening for both prevents blocking on open
exec 3<>"$FIFO"
# Write to FIFO via FD
echo "Message 1" >&3
echo "Message 2" >&3
# Read from FIFO via FD
read -r msg1 <&3
read -r msg2 <&3
echo "Got: $msg1, $msg2"
# Close FD
exec 3>&-
#!/usr/bin/env bash
set -euo pipefail
# Start coprocess (bidirectional pipe)
coproc BC { bc -l; }
# Send data to coprocess
echo "scale=10; 355/113" >&"${BC[1]}"
# Read result
read -r result <&"${BC[0]}"
echo "Pi approximation: $result"
# More calculations
echo "sqrt(2)" >&"${BC[1]}"
read -r sqrt2 <&"${BC[0]}"
echo "Square root of 2: $sqrt2"
# Close write end to signal EOF
exec {BC[1]}>&-
# Wait for coprocess to finish
wait "$BC_PID"
#!/usr/bin/env bash
set -euo pipefail
# Named coprocess for Python interpreter
coproc PYTHON { python3 -u -c "
import sys
for line in sys.stdin:
exec(line.strip())
"; }
# Send Python commands
echo "print('Hello from Python')" >&"${PYTHON[1]}"
read -r output <&"${PYTHON[0]}"
echo "Python said: $output"
echo "print(2**100)" >&"${PYTHON[1]}"
read -r big_num <&"${PYTHON[0]}"
echo "2^100 = $big_num"
# Cleanup
exec {PYTHON[1]}>&-
wait "$PYTHON_PID" 2>/dev/null || true
#!/usr/bin/env bash
set -euo pipefail
# Create pool of worker coprocesses
declare -A WORKERS
declare -A WORKER_PIDS
start_workers() {
local count="$1"
local i
for ((i=0; i<count; i++)); do
# Each worker runs a processing loop
coproc "WORKER_$i" {
while IFS= read -r task; do
[[ "$task" == "QUIT" ]] && exit 0
# Simulate work
sleep 0.1
echo "DONE:$task"
done
}
# Store FDs dynamically
local -n write_fd="WORKER_${i}[1]"
local -n read_fd="WORKER_${i}[0]"
local -n pid="WORKER_${i}_PID"
WORKERS["$i,in"]="$write_fd"
WORKERS["$i,out"]="$read_fd"
WORKER_PIDS["$i"]="$pid"
done
}
# Note: Coprocess pool management is complex
# Consider GNU Parallel for production workloads
#!/usr/bin/env bash
set -euo pipefail
PROGRESS_PIPE="/tmp/progress_$$"
mkfifo "$PROGRESS_PIPE"
trap 'rm -f "$PROGRESS_PIPE"' EXIT
# Progress monitor
monitor_progress() {
local total="$1"
local current=0
while read -r update; do
((current++))
local pct=$((current * 100 / total))
printf "\rProgress: [%-50s] %d%%" \
"$(printf '#%.0s' $(seq 1 $((pct/2))))" "$pct"
done < "$PROGRESS_PIPE"
echo
}
# Worker that reports progress
do_work() {
local items=("$@")
local item
for item in "${items[@]}"; do
process_item "$item"
echo "done" > "$PROGRESS_PIPE"
done
}
# Usage
items=(item1 item2 item3 ... item100)
monitor_progress "${#items[@]}" &
MONITOR_PID=$!
do_work "${items[@]}"
exec 3>"$PROGRESS_PIPE" # Keep pipe open
exec 3>&- # Close to signal completion
wait "$MONITOR_PID"
#!/usr/bin/env bash
set -euo pipefail
LOG_DIR="/tmp/logs_$$"
mkdir -p "$LOG_DIR"
# Create FIFOs for each log level
for level in DEBUG INFO WARN ERROR; do
mkfifo "$LOG_DIR/$level"
done
trap 'rm -rf "$LOG_DIR"' EXIT
# Aggregator process
aggregate_logs() {
local output_file="$1"
# Open all FIFOs for reading
exec 3<"$LOG_DIR/DEBUG"
exec 4<"$LOG_DIR/INFO"
exec 5<"$LOG_DIR/WARN"
exec 6<"$LOG_DIR/ERROR"
while true; do
# Use select-like behavior with read timeout
read -t 0.1 -r msg <&3 && echo "[DEBUG] $(date '+%H:%M:%S') $msg" >> "$output_file"
read -t 0.1 -r msg <&4 && echo "[INFO] $(date '+%H:%M:%S') $msg" >> "$output_file"
read -t 0.1 -r msg <&5 && echo "[WARN] $(date '+%H:%M:%S') $msg" >> "$output_file"
read -t 0.1 -r msg <&6 && echo "[ERROR] $(date '+%H:%M:%S') $msg" >> "$output_file"
done
}
# Logging functions
log_debug() { echo "$*" > "$LOG_DIR/DEBUG"; }
log_info() { echo "$*" > "$LOG_DIR/INFO"; }
log_warn() { echo "$*" > "$LOG_DIR/WARN"; }
log_error() { echo "$*" > "$LOG_DIR/ERROR"; }
# Start aggregator
aggregate_logs "/var/log/app.log" &
AGGREGATOR_PID=$!
# Application code uses logging functions
log_info "Application started"
log_debug "Processing item"
log_warn "Resource running low"
log_error "Critical failure"
# Cleanup
kill "$AGGREGATOR_PID" 2>/dev/null
#!/usr/bin/env bash
set -euo pipefail
# Buffered pipeline stage
buffered_stage() {
local name="$1"
local buffer_size="${2:-100}"
local buffer=()
while IFS= read -r line || [[ ${#buffer[@]} -gt 0 ]]; do
if [[ -n "$line" ]]; then
buffer+=("$line")
fi
# Flush when buffer full or EOF
if [[ ${#buffer[@]} -ge $buffer_size ]] || [[ -z "$line" && ${#buffer[@]} -gt 0 ]]; then
printf '%s\n' "${buffer[@]}" | process_batch
buffer=()
fi
done
}
# Parallel pipeline with process substitution
run_parallel_pipeline() {
local input="$1"
cat "$input" | \
tee >(filter_a | transform_a > output_a.txt) \
>(filter_b | transform_b > output_b.txt) \
>(filter_c | transform_c > output_c.txt) \
> /dev/null
# Wait for all background processes
wait
}
#!/usr/bin/env bash
set -euo pipefail
# Stream JSON array elements
stream_json_array() {
local url="$1"
# Use jq to stream array elements one per line
curl -s "$url" | jq -c '.items[]' | while IFS= read -r item; do
process_json_item "$item"
done
}
# Parallel JSON processing with process substitution
parallel_json_process() {
local input="$1"
local workers=4
# Split input across workers
jq -c '.[]' "$input" | \
parallel --pipe -N100 --jobs "$workers" '
while IFS= read -r item; do
echo "$item" | jq ".processed = true"
done
' | jq -s '.'
}
# Transform JSON stream
transform_json_stream() {
jq -c '.' | while IFS= read -r obj; do
# Process with bash
local id
id=$(echo "$obj" | jq -r '.id')
# Enrich and output
echo "$obj" | jq --arg ts "$(date -Iseconds)" '. + {timestamp: $ts}'
done
}
#!/usr/bin/env bash
# Requires Bash 5.3+
set -euo pipefail
# Traditional: forks subshell
result=$(echo "hello")
# Bash 5.3: No fork, runs in current shell
result=${ echo "hello"; }
# Significant for variable modifications
counter=0
# Traditional - counter stays 0 (subshell)
result=$(counter=$((counter + 1)); echo "$counter")
echo "Counter: $counter" # Still 0
# Bash 5.3 - counter is modified (same shell)
result=${ counter=$((counter + 1)); echo "$counter"; }
echo "Counter: $counter" # Now 1
# REPLY variable syntax (even more concise)
${ REPLY="computed value"; }
echo "$REPLY"
# Or using ${| } syntax
${| REPLY=$(expensive_computation); }
echo "Result: $REPLY"
#!/usr/bin/env bash
# Requires Bash 5.3+
set -euo pipefail
# Build result without forks
build_path() {
local parts=("$@")
local result=""
for part in "${parts[@]}"; do
# No fork for each concatenation
result=${ printf '%s/%s' "$result" "$part"; }
done
echo "${result#/}"
}
# Accumulate values efficiently
accumulate() {
local -n arr="$1"
local sum=0
for val in "${arr[@]}"; do
# In-shell arithmetic capture
sum=${ echo $((sum + val)); }
done
echo "$sum"
}
#!/usr/bin/env bash
set -euo pipefail
# Check all pipeline stages
run_pipeline() {
local result
# pipefail ensures we catch errors in any stage
if ! result=$(stage1 | stage2 | stage3); then
echo "Pipeline failed" >&2
return 1
fi
echo "$result"
}
# PIPESTATUS for detailed error info
run_with_status() {
cmd1 | cmd2 | cmd3
local -a status=("${PIPESTATUS[@]}")
for i in "${!status[@]}"; do
if [[ "${status[$i]}" -ne 0 ]]; then
echo "Stage $i failed with status ${status[$i]}" >&2
fi
done
# Return highest exit status
local max=0
for s in "${status[@]}"; do
((s > max)) && max="$s"
done
return "$max"
}
#!/usr/bin/env bash
set -euo pipefail
# Track resources for cleanup
declare -a CLEANUP_PIDS=()
declare -a CLEANUP_FILES=()
cleanup() {
local pid file
for pid in "${CLEANUP_PIDS[@]}"; do
kill "$pid" 2>/dev/null || true
done
for file in "${CLEANUP_FILES[@]}"; do
rm -f "$file" 2>/dev/null || true
done
}
trap cleanup EXIT
# Register cleanup
register_pid() { CLEANUP_PIDS+=("$1"); }
register_file() { CLEANUP_FILES+=("$1"); }
# Example usage
run_safe_pipeline() {
local fifo="/tmp/pipeline_$$"
mkfifo "$fifo"
register_file "$fifo"
producer > "$fifo" &
register_pid "$!"
consumer < "$fifo" &
register_pid "$!"
wait
}
#!/usr/bin/env bash
set -euo pipefail
# Include PID and descriptive name
create_fifo() {
local name="$1"
local fifo="/tmp/${name}_$$_$(date +%s)"
mkfifo -m 600 "$fifo" # Restrictive permissions
echo "$fifo"
}
# Use tmpdir for security
create_secure_fifo() {
local name="$1"
local tmpdir
tmpdir=$(mktemp -d)
local fifo="$tmpdir/$name"
mkfifo -m 600 "$fifo"
echo "$fifo"
}
#!/usr/bin/env bash
set -euo pipefail
# ✗ DEADLOCK - writer blocks, reader never starts
# mkfifo pipe
# echo "data" > pipe # Blocks forever
# ✓ SAFE - open both ends or use background
mkfifo pipe
trap 'rm -f pipe' EXIT
# Option 1: Background writer
echo "data" > pipe &
cat < pipe
# Option 2: Open for read/write
exec 3<>pipe
echo "data" >&3
read -r data <&3
exec 3>&-
# Option 3: Non-blocking open (requires careful handling)
exec 3<pipe &
exec 4>pipe
echo "data" >&4
read -r data <&3
#!/usr/bin/env bash
set -euo pipefail
# Read with timeout
read_with_timeout() {
local fifo="$1"
local timeout="$2"
local result
if read -t "$timeout" -r result < "$fifo"; then
echo "$result"
return 0
else
echo "Timeout after ${timeout}s" >&2
return 1
fi
}
# Write with timeout (using timeout command)
write_with_timeout() {
local fifo="$1"
local timeout="$2"
local data="$3"
if timeout "$timeout" bash -c "echo '$data' > '$fifo'"; then
return 0
else
echo "Write timeout after ${timeout}s" >&2
return 1
fi
}
Master process substitution and FIFOs for efficient inter-process communication without temporary files.
Weekly Installs
65
Repository
GitHub Stars
21
First Seen
Jan 24, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykWarn
Installed on
claude-code51
opencode51
gemini-cli49
codex48
cursor44
github-copilot42
GitHub Actions 官方文档查询助手 - 精准解答 CI/CD 工作流问题
47,200 周安装