重要前提
安装AI Skills的关键前提是:必须科学上网,且开启TUN模式,这一点至关重要,直接决定安装能否顺利完成,在此郑重提醒三遍:科学上网,科学上网,科学上网。查看完整安装教程 →
advanced-array-patterns by josiahsiegel/claude-plugin-marketplace
npx skills add https://github.com/josiahsiegel/claude-plugin-marketplace --skill advanced-array-patterns强制规定:在 Windows 上始终对文件路径使用反斜杠
在 Windows 上使用编辑或写入工具时,你必须在文件路径中使用反斜杠(\),而不是正斜杠(/)。
关于 bash 数组的综合指南,包括索引数组、关联数组、mapfile/readarray 以及遵循 2025 年最佳实践的高级操作模式。
#!/usr/bin/env bash
set -euo pipefail
# 方法 1:直接赋值
files=("file1.txt" "file2.txt" "file with spaces.txt")
# 方法 2:复合赋值
declare -a numbers=(1 2 3 4 5)
# 方法 3:逐个赋值
fruits[0]="apple"
fruits[1]="banana"
fruits[2]="cherry"
# 方法 4:从命令输出获取(注意单词分割问题)
# ✗ 危险 - 会在空格处分割
files_bad=$(ls)
# ✓ 安全 - 保留带空格的文件名
mapfile -t files_good < <(find . -name "*.txt")
# 方法 5:花括号扩展
numbers=({1..100})
letters=({a..z})
#!/usr/bin/env bash
set -euo pipefail
arr=("first" "second" "third" "fourth" "fifth")
# 长度
echo "Length: ${#arr[@]}" # 5
# 访问元素
echo "First: ${arr[0]}"
echo "Last: ${arr[-1]}" # Bash 4.3+
echo "Second to last: ${arr[-2]}"
# 所有元素(正确引用以处理空格)
for item in "${arr[@]}"; do
echo "Item: $item"
done
# 所有索引
for idx in "${!arr[@]}"; do
echo "Index $idx: ${arr[$idx]}"
done
# 切片 (偏移量:长度)
echo "${arr[@]:1:3}" # second third fourth
# 从偏移量到末尾的切片
echo "${arr[@]:2}" # third fourth fifth
# 追加元素
arr+=("sixth")
# 在指定位置插入(复杂操作)
arr=("${arr[@]:0:2}" "inserted" "${arr[@]:2}")
# 按索引删除元素
unset 'arr[2]'
# 按值删除(所有出现项)
arr_new=()
for item in "${arr[@]}"; do
[[ "$item" != "second" ]] && arr_new+=("$item")
done
arr=("${arr_new[@]}")
# 检查是否为空
if [[ ${#arr[@]} -eq 0 ]]; then
echo "Array is empty"
fi
# 检查元素是否存在
contains() {
local needle="$1"
shift
local item
for item in "$@"; do
[[ "$item" == "$needle" ]] && return 0
done
return 1
}
if contains "third" "${arr[@]}"; then
echo "Found 'third'"
fi
广告位招租
在这里展示您的产品或服务
触达数万 AI 开发者,精准高效
#!/usr/bin/env bash
set -euo pipefail
arr=("apple" "banana" "cherry" "date")
# 映射(转换每个元素)
upper_arr=()
for item in "${arr[@]}"; do
upper_arr+=("${item^^}") # 大写
done
# 过滤
filtered=()
for item in "${arr[@]}"; do
[[ ${#item} -gt 5 ]] && filtered+=("$item")
done
# 将数组连接为字符串
IFS=','
joined="${arr[*]}"
unset IFS
echo "$joined" # apple,banana,cherry,date
# 将字符串分割为数组
IFS=',' read -ra split_arr <<< "one,two,three"
# 唯一值
declare -A seen
unique=()
for item in "${arr[@]}"; do
if [[ -z "${seen[$item]:-}" ]]; then
seen[$item]=1
unique+=("$item")
fi
done
# 数组排序
readarray -t sorted < <(printf '%s\n' "${arr[@]}" | sort)
# 数组反转
reversed=()
for ((i=${#arr[@]}-1; i>=0; i--)); do
reversed+=("${arr[$i]}")
done
# 或者使用 tac
readarray -t reversed < <(printf '%s\n' "${arr[@]}" | tac)
#!/usr/bin/env bash
set -euo pipefail
# 必须使用 -A 声明
declare -A config
# 赋值
config["host"]="localhost"
config["port"]="8080"
config["debug"]="true"
# 或者复合赋值
declare -A user=(
[name]="John Doe"
[email]="john@example.com"
[role]="admin"
)
# 访问
echo "Host: ${config[host]}"
echo "User: ${user[name]}"
# 键不存在时的默认值
echo "${config[missing]:-default}"
# 检查键是否存在
if [[ -v config[host] ]]; then
echo "Host is set"
fi
# 替代检查方法
if [[ -n "${config[host]+x}" ]]; then
echo "Host key exists (even if empty)"
fi
# 所有键
echo "Keys: ${!config[@]}"
# 所有值
echo "Values: ${config[@]}"
# 长度(键的数量)
echo "Size: ${#config[@]}"
# 遍历
for key in "${!config[@]}"; do
echo "$key = ${config[$key]}"
done
# 删除键
unset 'config[debug]'
# 清空整个数组
config=()
#!/usr/bin/env bash
set -euo pipefail
# 配置解析器
parse_config() {
local config_file="$1"
declare -gA CONFIG # 全局关联数组
while IFS='=' read -r key value; do
# 跳过注释和空行
[[ "$key" =~ ^[[:space:]]*# ]] && continue
[[ -z "$key" ]] && continue
# 修剪空白字符
key="${key//[[:space:]]/}"
value="${value#"${value%%[![:space:]]*}"}" # 左修剪
value="${value%"${value##*[![:space:]]}"}" # 右修剪
CONFIG["$key"]="$value"
done < "$config_file"
}
# 用法
parse_config "/etc/myapp.conf"
echo "Database: ${CONFIG[database]:-not set}"
# 计数器/频率映射
count_words() {
local file="$1"
declare -A word_count
while read -ra words; do
for word in "${words[@]}"; do
# 标准化:小写,移除标点
word="${word,,}"
word="${word//[^a-z]/}"
[[ -n "$word" ]] && ((word_count[$word]++))
done
done < "$file"
# 按计数排序输出
for word in "${!word_count[@]}"; do
echo "${word_count[$word]} $word"
done | sort -rn | head -10
}
# 缓存模式
declare -A CACHE
cached_expensive_operation() {
local key="$1"
# 检查缓存
if [[ -n "${CACHE[$key]+x}" ]]; then
echo "${CACHE[$key]}"
return 0
fi
# 计算并缓存
local result
result=$(expensive_computation "$key")
CACHE["$key"]="$result"
echo "$result"
}
# JSON 式嵌套数据(使用分隔键)
declare -A data
data["user.name"]="John"
data["user.email"]="john@example.com"
data["user.address.city"]="NYC"
data["user.address.zip"]="10001"
# 访问嵌套数据
echo "City: ${data[user.address.city]}"
#!/usr/bin/env bash
set -euo pipefail
# 将文件读入数组(每行 = 一个元素)
mapfile -t lines < file.txt
# 或者等价写法:
readarray -t lines < file.txt
# -t 移除尾随换行符
# 不使用 -t 时,每个元素包含 \n
# 处理每一行
for line in "${lines[@]}"; do
echo "Line: $line"
done
# 从命令输出读取
mapfile -t files < <(find . -name "*.sh")
# 从 here-doc 读取
mapfile -t data <<'EOF'
line1
line2
line3
EOF
#!/usr/bin/env bash
set -euo pipefail
# -n COUNT:最多读取 COUNT 行
mapfile -t -n 10 first_10 < large_file.txt
# -s COUNT:跳过前 COUNT 行
mapfile -t -s 1 skip_header < data.csv # 跳过标题行
# -O INDEX:从 INDEX 开始而不是 0
existing_array=("a" "b")
mapfile -t -O "${#existing_array[@]}" existing_array < more_data.txt
# -d DELIM:使用 DELIM 而不是换行符 (Bash 4.4+)
# 读取 NUL 分隔的数据(对包含换行符的文件名安全)
mapfile -t -d '' files < <(find . -name "*.txt" -print0)
# -C CALLBACK:每 QUANTUM 行执行一次回调
# -c QUANTUM:回调之间的行数(默认 5000)
process_chunk() {
local index=$1
echo "Processing lines around index $index" >&2
}
export -f process_chunk
mapfile -t -c 1000 -C process_chunk lines < huge_file.txt
#!/usr/bin/env bash
set -euo pipefail
# 解析 CSV 文件
parse_csv() {
local csv_file="$1"
local -n result_array="$2" # 名称引用 (Bash 4.3+)
while IFS=',' read -ra row; do
result_array+=("${row[*]}") # 存储为分隔字符串
done < "$csv_file"
}
# 更好的方法:模拟 2D 数组存储
declare -A csv_data
row_num=0
while IFS=',' read -ra fields; do
for col_num in "${!fields[@]}"; do
csv_data["$row_num,$col_num"]="${fields[$col_num]}"
done
((row_num++))
done < data.csv
# 访问单元格
echo "Row 2, Col 3: ${csv_data[2,3]}"
#!/usr/bin/env bash
set -euo pipefail
# ✗ 慢 - 循环中的命令替换
slow_build() {
local arr=()
for i in {1..1000}; do
arr+=("$(echo "$i")") # 每个都创建子shell!
done
}
# ✓ 快 - 直接赋值
fast_build() {
local arr=()
for i in {1..1000}; do
arr+=("$i") # 无子shell
done
}
# ✓ 最快 - 使用 mapfile 读取文件数据
fastest_file_read() {
mapfile -t arr < file.txt
}
#!/usr/bin/env bash
set -euo pipefail
# ✗ 慢 - 每次迭代都创建子shell
slow_process() {
local sum=0
for num in "${numbers[@]}"; do
result=$(echo "$num * 2" | bc) # 子shell!
((sum += result))
done
}
# ✓ 快 - Bash 算术运算
fast_process() {
local sum=0
for num in "${numbers[@]}"; do
((sum += num * 2))
done
}
# ✓ 快 - 使用进程替换进行并行读取
while read -r line1 <&3 && read -r line2 <&4; do
echo "$line1 | $line2"
done 3< <(command1) 4< <(command2)
#!/usr/bin/env bash
set -euo pipefail
# 对于非常大的数组,请考虑:
# 1. 分块处理
# 2. 使用外部工具 (awk, sort)
# 3. 流式处理而不是全部加载
# 分块处理
process_in_chunks() {
local -n arr="$1"
local chunk_size="${2:-1000}"
local len="${#arr[@]}"
for ((i=0; i<len; i+=chunk_size)); do
local chunk=("${arr[@]:i:chunk_size}")
process_chunk "${chunk[@]}"
done
}
# 流式处理(内存高效)
# 替代方案:
# mapfile -t all_lines < huge_file.txt
# process "${all_lines[@]}"
# 使用:
while IFS= read -r line; do
process_line "$line"
done < huge_file.txt
#!/usr/bin/env bash
# 需要 Bash 5.2+
set -euo pipefail
declare -A config
# 下标表达式只计算一次 (5.2+)
key="host"
config[$key]="localhost" # 正确计算
# 关联数组的 '@' 和 '*' 下标
# 现在可以只取消设置 '@' 键而不是整个数组
declare -A special
special[@]="at sign value"
special[*]="asterisk value"
special[normal]="normal value"
# 取消设置特定键 (Bash 5.2+)
unset 'special[@]' # 只移除 '@' 键,而不是整个数组
#!/usr/bin/env bash
# 需要 Bash 5.3
set -euo pipefail
# 按修改时间排序通配符结果(最新的在前)
GLOBSORT="-mtime"
recent_files=(*.txt)
# 按大小排序
GLOBSORT="size"
files_by_size=(*.log)
# 重置为默认值(字母顺序)
GLOBSORT="name"
#!/usr/bin/env bash
set -euo pipefail
declare -a STACK=()
push() {
STACK+=("$1")
}
pop() {
if [[ ${#STACK[@]} -eq 0 ]]; then
echo "Stack empty" >&2
return 1
fi
echo "${STACK[-1]}"
unset 'STACK[-1]'
}
peek() {
if [[ ${#STACK[@]} -gt 0 ]]; then
echo "${STACK[-1]}"
fi
}
# 用法
push "first"
push "second"
push "third"
echo "Top: $(peek)" # third
echo "Pop: $(pop)" # third
echo "Pop: $(pop)" # second
#!/usr/bin/env bash
set -euo pipefail
declare -a QUEUE=()
enqueue() {
QUEUE+=("$1")
}
dequeue() {
if [[ ${#QUEUE[@]} -eq 0 ]]; then
echo "Queue empty" >&2
return 1
fi
echo "${QUEUE[0]}"
QUEUE=("${QUEUE[@]:1}")
}
# 用法
enqueue "task1"
enqueue "task2"
enqueue "task3"
echo "Next: $(dequeue)" # task1
echo "Next: $(dequeue)" # task2
#!/usr/bin/env bash
set -euo pipefail
# 并集
array_union() {
local -n arr1="$1"
local -n arr2="$2"
local -A seen
local result=()
for item in "${arr1[@]}" "${arr2[@]}"; do
if [[ -z "${seen[$item]:-}" ]]; then
seen[$item]=1
result+=("$item")
fi
done
printf '%s\n' "${result[@]}"
}
# 交集
array_intersection() {
local -n arr1="$1"
local -n arr2="$2"
local -A set1
local result=()
for item in "${arr1[@]}"; do
set1[$item]=1
done
for item in "${arr2[@]}"; do
if [[ -n "${set1[$item]:-}" ]]; then
result+=("$item")
fi
done
printf '%s\n' "${result[@]}"
}
# 差集 (arr1 - arr2)
array_difference() {
local -n arr1="$1"
local -n arr2="$2"
local -A set2
local result=()
for item in "${arr2[@]}"; do
set2[$item]=1
done
for item in "${arr1[@]}"; do
if [[ -z "${set2[$item]:-}" ]]; then
result+=("$item")
fi
done
printf '%s\n' "${result[@]}"
}
掌握 bash 数组以实现高效的数据操作,并避免常见的陷阱,如单词分割和子shell开销。
每周安装数
58
代码仓库
GitHub 星标数
21
首次出现
2026年1月24日
安全审计
安装于
claude-code46
gemini-cli44
opencode44
codex41
cursor39
github-copilot37
MANDATORY: Always Use Backslashes on Windows for File Paths
When using Edit or Write tools on Windows, you MUST use backslashes (\) in file paths, NOT forward slashes (/).
Comprehensive guide to bash arrays including indexed arrays, associative arrays, mapfile/readarray, and advanced manipulation patterns following 2025 best practices.
#!/usr/bin/env bash
set -euo pipefail
# Method 1: Direct assignment
files=("file1.txt" "file2.txt" "file with spaces.txt")
# Method 2: Compound assignment
declare -a numbers=(1 2 3 4 5)
# Method 3: Individual assignment
fruits[0]="apple"
fruits[1]="banana"
fruits[2]="cherry"
# Method 4: From command output (CAREFUL with word splitting)
# ✗ DANGEROUS - splits on spaces
files_bad=$(ls)
# ✓ SAFE - preserves filenames with spaces
mapfile -t files_good < <(find . -name "*.txt")
# Method 5: Brace expansion
numbers=({1..100})
letters=({a..z})
#!/usr/bin/env bash
set -euo pipefail
arr=("first" "second" "third" "fourth" "fifth")
# Length
echo "Length: ${#arr[@]}" # 5
# Access elements
echo "First: ${arr[0]}"
echo "Last: ${arr[-1]}" # Bash 4.3+
echo "Second to last: ${arr[-2]}"
# All elements (properly quoted for spaces)
for item in "${arr[@]}"; do
echo "Item: $item"
done
# All indices
for idx in "${!arr[@]}"; do
echo "Index $idx: ${arr[$idx]}"
done
# Slice (offset:length)
echo "${arr[@]:1:3}" # second third fourth
# Slice from offset to end
echo "${arr[@]:2}" # third fourth fifth
# Append element
arr+=("sixth")
# Insert at position (complex)
arr=("${arr[@]:0:2}" "inserted" "${arr[@]:2}")
# Remove element by index
unset 'arr[2]'
# Remove by value (all occurrences)
arr_new=()
for item in "${arr[@]}"; do
[[ "$item" != "second" ]] && arr_new+=("$item")
done
arr=("${arr_new[@]}")
# Check if empty
if [[ ${#arr[@]} -eq 0 ]]; then
echo "Array is empty"
fi
# Check if element exists
contains() {
local needle="$1"
shift
local item
for item in "$@"; do
[[ "$item" == "$needle" ]] && return 0
done
return 1
}
if contains "third" "${arr[@]}"; then
echo "Found 'third'"
fi
#!/usr/bin/env bash
set -euo pipefail
arr=("apple" "banana" "cherry" "date")
# Map (transform each element)
upper_arr=()
for item in "${arr[@]}"; do
upper_arr+=("${item^^}") # Uppercase
done
# Filter
filtered=()
for item in "${arr[@]}"; do
[[ ${#item} -gt 5 ]] && filtered+=("$item")
done
# Join array to string
IFS=','
joined="${arr[*]}"
unset IFS
echo "$joined" # apple,banana,cherry,date
# Split string to array
IFS=',' read -ra split_arr <<< "one,two,three"
# Unique values
declare -A seen
unique=()
for item in "${arr[@]}"; do
if [[ -z "${seen[$item]:-}" ]]; then
seen[$item]=1
unique+=("$item")
fi
done
# Sort array
readarray -t sorted < <(printf '%s\n' "${arr[@]}" | sort)
# Reverse array
reversed=()
for ((i=${#arr[@]}-1; i>=0; i--)); do
reversed+=("${arr[$i]}")
done
# Or using tac
readarray -t reversed < <(printf '%s\n' "${arr[@]}" | tac)
#!/usr/bin/env bash
set -euo pipefail
# MUST declare with -A
declare -A config
# Assignment
config["host"]="localhost"
config["port"]="8080"
config["debug"]="true"
# Or compound assignment
declare -A user=(
[name]="John Doe"
[email]="john@example.com"
[role]="admin"
)
# Access
echo "Host: ${config[host]}"
echo "User: ${user[name]}"
# Default value if key missing
echo "${config[missing]:-default}"
# Check if key exists
if [[ -v config[host] ]]; then
echo "Host is set"
fi
# Alternative check
if [[ -n "${config[host]+x}" ]]; then
echo "Host key exists (even if empty)"
fi
# All keys
echo "Keys: ${!config[@]}"
# All values
echo "Values: ${config[@]}"
# Length (number of keys)
echo "Size: ${#config[@]}"
# Iterate
for key in "${!config[@]}"; do
echo "$key = ${config[$key]}"
done
# Delete key
unset 'config[debug]'
# Clear entire array
config=()
#!/usr/bin/env bash
set -euo pipefail
# Configuration parser
parse_config() {
local config_file="$1"
declare -gA CONFIG # Global associative array
while IFS='=' read -r key value; do
# Skip comments and empty lines
[[ "$key" =~ ^[[:space:]]*# ]] && continue
[[ -z "$key" ]] && continue
# Trim whitespace
key="${key//[[:space:]]/}"
value="${value#"${value%%[![:space:]]*}"}" # Left trim
value="${value%"${value##*[![:space:]]}"}" # Right trim
CONFIG["$key"]="$value"
done < "$config_file"
}
# Usage
parse_config "/etc/myapp.conf"
echo "Database: ${CONFIG[database]:-not set}"
# Counter/frequency map
count_words() {
local file="$1"
declare -A word_count
while read -ra words; do
for word in "${words[@]}"; do
# Normalize: lowercase, remove punctuation
word="${word,,}"
word="${word//[^a-z]/}"
[[ -n "$word" ]] && ((word_count[$word]++))
done
done < "$file"
# Print sorted by count
for word in "${!word_count[@]}"; do
echo "${word_count[$word]} $word"
done | sort -rn | head -10
}
# Caching pattern
declare -A CACHE
cached_expensive_operation() {
local key="$1"
# Check cache
if [[ -n "${CACHE[$key]+x}" ]]; then
echo "${CACHE[$key]}"
return 0
fi
# Compute and cache
local result
result=$(expensive_computation "$key")
CACHE["$key"]="$result"
echo "$result"
}
# JSON-like nested data (using delimited keys)
declare -A data
data["user.name"]="John"
data["user.email"]="john@example.com"
data["user.address.city"]="NYC"
data["user.address.zip"]="10001"
# Access nested
echo "City: ${data[user.address.city]}"
#!/usr/bin/env bash
set -euo pipefail
# Read file into array (each line = element)
mapfile -t lines < file.txt
# Or equivalently:
readarray -t lines < file.txt
# -t removes trailing newlines
# Without -t, each element includes \n
# Process each line
for line in "${lines[@]}"; do
echo "Line: $line"
done
# Read from command output
mapfile -t files < <(find . -name "*.sh")
# Read from here-doc
mapfile -t data <<'EOF'
line1
line2
line3
EOF
#!/usr/bin/env bash
set -euo pipefail
# -n COUNT: Read at most COUNT lines
mapfile -t -n 10 first_10 < large_file.txt
# -s COUNT: Skip first COUNT lines
mapfile -t -s 1 skip_header < data.csv # Skip header row
# -O INDEX: Start at INDEX instead of 0
existing_array=("a" "b")
mapfile -t -O "${#existing_array[@]}" existing_array < more_data.txt
# -d DELIM: Use DELIM instead of newline (Bash 4.4+)
# Read NUL-delimited data (safe for filenames with newlines)
mapfile -t -d '' files < <(find . -name "*.txt" -print0)
# -C CALLBACK: Execute callback every QUANTUM lines
# -c QUANTUM: Number of lines between callbacks (default 5000)
process_chunk() {
local index=$1
echo "Processing lines around index $index" >&2
}
export -f process_chunk
mapfile -t -c 1000 -C process_chunk lines < huge_file.txt
#!/usr/bin/env bash
set -euo pipefail
# Parse CSV file
parse_csv() {
local csv_file="$1"
local -n result_array="$2" # nameref (Bash 4.3+)
while IFS=',' read -ra row; do
result_array+=("${row[*]}") # Store as delimited string
done < "$csv_file"
}
# Better: Store as 2D array simulation
declare -A csv_data
row_num=0
while IFS=',' read -ra fields; do
for col_num in "${!fields[@]}"; do
csv_data["$row_num,$col_num"]="${fields[$col_num]}"
done
((row_num++))
done < data.csv
# Access cell
echo "Row 2, Col 3: ${csv_data[2,3]}"
#!/usr/bin/env bash
set -euo pipefail
# ✗ SLOW - Command substitution in loop
slow_build() {
local arr=()
for i in {1..1000}; do
arr+=("$(echo "$i")") # Subshell for each!
done
}
# ✓ FAST - Direct assignment
fast_build() {
local arr=()
for i in {1..1000}; do
arr+=("$i") # No subshell
done
}
# ✓ FASTEST - mapfile for file data
fastest_file_read() {
mapfile -t arr < file.txt
}
#!/usr/bin/env bash
set -euo pipefail
# ✗ SLOW - Subshell each iteration
slow_process() {
local sum=0
for num in "${numbers[@]}"; do
result=$(echo "$num * 2" | bc) # Subshell!
((sum += result))
done
}
# ✓ FAST - Bash arithmetic
fast_process() {
local sum=0
for num in "${numbers[@]}"; do
((sum += num * 2))
done
}
# ✓ FAST - Process substitution for parallel reads
while read -r line1 <&3 && read -r line2 <&4; do
echo "$line1 | $line2"
done 3< <(command1) 4< <(command2)
#!/usr/bin/env bash
set -euo pipefail
# For very large arrays, consider:
# 1. Process in chunks
# 2. Use external tools (awk, sort)
# 3. Stream processing instead of loading all
# Chunk processing
process_in_chunks() {
local -n arr="$1"
local chunk_size="${2:-1000}"
local len="${#arr[@]}"
for ((i=0; i<len; i+=chunk_size)); do
local chunk=("${arr[@]:i:chunk_size}")
process_chunk "${chunk[@]}"
done
}
# Stream processing (memory efficient)
# Instead of:
# mapfile -t all_lines < huge_file.txt
# process "${all_lines[@]}"
# Use:
while IFS= read -r line; do
process_line "$line"
done < huge_file.txt
#!/usr/bin/env bash
# Requires Bash 5.2+
set -euo pipefail
declare -A config
# Subscript expressions evaluated once (5.2+)
key="host"
config[$key]="localhost" # Evaluated correctly
# '@' and '*' subscripts for associative arrays
# Can now unset just the key '@' instead of entire array
declare -A special
special[@]="at sign value"
special[*]="asterisk value"
special[normal]="normal value"
# Unset specific key (Bash 5.2+)
unset 'special[@]' # Only removes '@' key, not whole array
#!/usr/bin/env bash
# Requires Bash 5.3
set -euo pipefail
# Sort glob results by modification time (newest first)
GLOBSORT="-mtime"
recent_files=(*.txt)
# Sort by size
GLOBSORT="size"
files_by_size=(*.log)
# Reset to default (alphabetical)
GLOBSORT="name"
#!/usr/bin/env bash
set -euo pipefail
declare -a STACK=()
push() {
STACK+=("$1")
}
pop() {
if [[ ${#STACK[@]} -eq 0 ]]; then
echo "Stack empty" >&2
return 1
fi
echo "${STACK[-1]}"
unset 'STACK[-1]'
}
peek() {
if [[ ${#STACK[@]} -gt 0 ]]; then
echo "${STACK[-1]}"
fi
}
# Usage
push "first"
push "second"
push "third"
echo "Top: $(peek)" # third
echo "Pop: $(pop)" # third
echo "Pop: $(pop)" # second
#!/usr/bin/env bash
set -euo pipefail
declare -a QUEUE=()
enqueue() {
QUEUE+=("$1")
}
dequeue() {
if [[ ${#QUEUE[@]} -eq 0 ]]; then
echo "Queue empty" >&2
return 1
fi
echo "${QUEUE[0]}"
QUEUE=("${QUEUE[@]:1}")
}
# Usage
enqueue "task1"
enqueue "task2"
enqueue "task3"
echo "Next: $(dequeue)" # task1
echo "Next: $(dequeue)" # task2
#!/usr/bin/env bash
set -euo pipefail
# Union
array_union() {
local -n arr1="$1"
local -n arr2="$2"
local -A seen
local result=()
for item in "${arr1[@]}" "${arr2[@]}"; do
if [[ -z "${seen[$item]:-}" ]]; then
seen[$item]=1
result+=("$item")
fi
done
printf '%s\n' "${result[@]}"
}
# Intersection
array_intersection() {
local -n arr1="$1"
local -n arr2="$2"
local -A set1
local result=()
for item in "${arr1[@]}"; do
set1[$item]=1
done
for item in "${arr2[@]}"; do
if [[ -n "${set1[$item]:-}" ]]; then
result+=("$item")
fi
done
printf '%s\n' "${result[@]}"
}
# Difference (arr1 - arr2)
array_difference() {
local -n arr1="$1"
local -n arr2="$2"
local -A set2
local result=()
for item in "${arr2[@]}"; do
set2[$item]=1
done
for item in "${arr1[@]}"; do
if [[ -z "${set2[$item]:-}" ]]; then
result+=("$item")
fi
done
printf '%s\n' "${result[@]}"
}
Master bash arrays for efficient data manipulation and avoid common pitfalls like word splitting and subshell overhead.
Weekly Installs
58
Repository
GitHub Stars
21
First Seen
Jan 24, 2026
Security Audits
Gen Agent Trust HubPassSocketPassSnykPass
Installed on
claude-code46
gemini-cli44
opencode44
codex41
cursor39
github-copilot37
Lark Contact CLI 工具:高效搜索与获取飞书用户信息,提升团队协作效率
48,200 周安装