Adam Warner 2 months ago committed by GitHub
commit 05464fc1ac
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,3 +1,4 @@
doubleclick doubleclick
wan wan
nwe nwe
padd

@ -23,6 +23,13 @@ jobs:
# If FAIL is 1 then we fail. # If FAIL is 1 then we fail.
[[ $FAIL == 1 ]] && exit 1 || echo "Scripts are executable!" [[ $FAIL == 1 ]] && exit 1 || echo "Scripts are executable!"
- name: Run shellcheck
uses: ludeeus/action-shellcheck@master
with:
check_together: 'yes'
format: tty
severity: error
- name: Spell-Checking - name: Spell-Checking
uses: codespell-project/actions-codespell@master uses: codespell-project/actions-codespell@master
with: with:

@ -1,35 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Dnsmasq config for Pi-hole's FTLDNS
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###############################################################################
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
# #
# IF YOU WISH TO CHANGE THE UPSTREAM SERVERS, CHANGE THEM IN: #
# /etc/pihole/setupVars.conf #
# #
# ANY OTHER CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
# WITHIN /etc/dnsmasq.d/yourname.conf #
###############################################################################
addn-hosts=/etc/pihole/local.list
addn-hosts=/etc/pihole/custom.list
domain-needed
localise-queries
bogus-priv
no-resolv
log-queries
log-facility=/var/log/pihole/pihole.log
log-async

@ -1,42 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2021 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# RFC 6761 config file for Pi-hole
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###############################################################################
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
# #
# CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
# WITHIN /etc/dnsmasq.d/yourname.conf #
###############################################################################
# RFC 6761: Caching DNS servers SHOULD recognize
# test, localhost, invalid
# names as special and SHOULD NOT attempt to look up NS records for them, or
# otherwise query authoritative DNS servers in an attempt to resolve these
# names.
server=/test/
server=/localhost/
server=/invalid/
# The same RFC requests something similar for
# 10.in-addr.arpa. 21.172.in-addr.arpa. 27.172.in-addr.arpa.
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 28.172.in-addr.arpa.
# 17.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
# 18.172.in-addr.arpa. 24.172.in-addr.arpa. 30.172.in-addr.arpa.
# 19.172.in-addr.arpa. 25.172.in-addr.arpa. 31.172.in-addr.arpa.
# 20.172.in-addr.arpa. 26.172.in-addr.arpa. 168.192.in-addr.arpa.
# Pi-hole implements this via the dnsmasq option "bogus-priv" (see
# 01-pihole.conf) because this also covers IPv6.
# OpenWRT furthermore blocks bind, local, onion domains
# see https://git.openwrt.org/?p=openwrt/openwrt.git;a=blob_plain;f=package/network/services/dnsmasq/files/rfc6761.conf;hb=HEAD
# and https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
# We do not include the ".local" rule ourselves, see https://github.com/pi-hole/pi-hole/pull/4282#discussion_r689112972
server=/bind/
server=/onion/

@ -1,5 +1,5 @@
# Determine if terminal is capable of showing colors # Determine if terminal is capable of showing colors
if ([[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]) || [[ "${WEBCALL}" ]]; then if ([ -t 1 ] && [ $(tput colors) -ge 8 ]) || [ "${WEBCALL}" ]; then
# Bold and underline may not show up on all clients # Bold and underline may not show up on all clients
# If something MUST be emphasized, use both # If something MUST be emphasized, use both
COL_BOLD='' COL_BOLD=''

@ -0,0 +1,194 @@
#!/usr/bin/env sh
# shellcheck disable=SC3043 #https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Script to hold api functions for use in other scripts
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
# The basic usage steps are
# 1) Test Availability of the API
# 2) Try to authenticate (read password if needed)
# 3) Get the data from the API endpoint
# 4) Delete the session
TestAPIAvailability() {
# as we are running locally, we can get the port value from FTL directly
local chaos_api_list availabilityResonse
# Query the API URLs from FTL using CHAOS TXT local.api.ftl
# The result is a space-separated enumeration of full URLs
# e.g., "http://localhost:80/api/" "https://localhost:443/api/"
chaos_api_list="$(dig +short chaos txt local.api.ftl @127.0.0.1)"
# If the query was not successful, the variable is empty
if [ -z "${chaos_api_list}" ]; then
echo "API not available. Please check connectivity"
exit 1
fi
# Iterate over space-separated list of URLs
while [ -n "${chaos_api_list}" ]; do
# Get the first URL
API_URL="${chaos_api_list%% *}"
# Strip leading and trailing quotes
API_URL="${API_URL%\"}"
API_URL="${API_URL#\"}"
# Test if the API is available at this URL
availabilityResonse=$(curl -skS -o /dev/null -w "%{http_code}" "${API_URL}auth")
# Test if http status code was 200 (OK) or 401 (authentication required)
if [ ! "${availabilityResonse}" = 200 ] && [ ! "${availabilityResonse}" = 401 ]; then
# API is not available at this port/protocol combination
API_PORT=""
else
# API is available at this URL combination
break
fi
# Remove the first URL from the list
local last_api_list
last_api_list="${chaos_api_list}"
chaos_api_list="${chaos_api_list#* }"
# If the list did not change, we are at the last element
if [ "${last_api_list}" = "${chaos_api_list}" ]; then
# Remove the last element
chaos_api_list=""
fi
done
# if API_PORT is empty, no working API port was found
if [ -n "${API_PORT}" ]; then
echo "API not available at: ${API_URL}"
echo "Exiting."
exit 1
fi
}
Authentication() {
# Try to authenticate
LoginAPI
while [ "${validSession}" = false ] || [ -z "${validSession}" ] ; do
echo "Authentication failed. Please enter your Pi-hole password"
# secretly read the password
secretRead; printf '\n'
# Try to authenticate again
LoginAPI
done
# Loop exited, authentication was successful
echo "Authentication successful."
}
LoginAPI() {
sessionResponse="$(curl -skS -X POST "${API_URL}auth" --user-agent "Pi-hole cli " --data "{\"password\":\"${password}\"}" )"
if [ -z "${sessionResponse}" ]; then
echo "No response from FTL server. Please check connectivity"
exit 1
fi
# obtain validity and session ID from session response
validSession=$(echo "${sessionResponse}"| jq .session.valid 2>/dev/null)
SID=$(echo "${sessionResponse}"| jq --raw-output .session.sid 2>/dev/null)
}
DeleteSession() {
# if a valid Session exists (no password required or successful Authentication) and
# SID is not null (successful Authentication only), delete the session
if [ "${validSession}" = true ] && [ ! "${SID}" = null ]; then
# Try to delete the session. Omit the output, but get the http status code
deleteResponse=$(curl -skS -o /dev/null -w "%{http_code}" -X DELETE "${API_URL}auth" -H "Accept: application/json" -H "sid: ${SID}")
case "${deleteResponse}" in
"204") printf "%b" "Session successfully deleted.\n";;
"401") printf "%b" "Logout attempt without a valid session. Unauthorized!\n";;
esac;
fi
}
GetFTLData() {
local data response status
# get the data from querying the API as well as the http status code
response=$(curl -skS -w "%{http_code}" -X GET "${API_URL}$1" -H "Accept: application/json" -H "sid: ${SID}" )
# status are the last 3 characters
status=$(printf %s "${response#"${response%???}"}")
# data is everything from response without the last 3 characters
data=$(printf %s "${response%???}")
if [ "${status}" = 200 ]; then
# response OK
printf %s "${data}"
elif [ "${status}" = 000 ]; then
# connection lost
echo "000"
elif [ "${status}" = 401 ]; then
# unauthorized
echo "401"
fi
}
secretRead() {
# POSIX compliant function to read user-input and
# mask every character entered by (*)
#
# This is challenging, because in POSIX, `read` does not support
# `-s` option (suppressing the input) or
# `-n` option (reading n chars)
# This workaround changes the terminal characteristics to not echo input and later resets this option
# credits https://stackoverflow.com/a/4316765
# showing asterisk instead of password
# https://stackoverflow.com/a/24600839
# https://unix.stackexchange.com/a/464963
# Save current terminal settings (needed for later restore after password prompt)
stty_orig=$(stty -g)
stty -echo # do not echo user input
stty -icanon min 1 time 0 # disable canonical mode https://man7.org/linux/man-pages/man3/termios.3.html
unset password
unset key
unset charcount
charcount=0
while key=$(dd ibs=1 count=1 2>/dev/null); do #read one byte of input
if [ "${key}" = "$(printf '\0' | tr -d '\0')" ] ; then
# Enter - accept password
break
fi
if [ "${key}" = "$(printf '\177')" ] ; then
# Backspace
if [ $charcount -gt 0 ] ; then
charcount=$((charcount-1))
printf '\b \b'
password="${password%?}"
fi
else
# any other character
charcount=$((charcount+1))
printf '*'
password="$password$key"
fi
done
# restore original terminal settings
stty "${stty_orig}"
}

@ -1,577 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC1090,SC1091
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Calculates stats and displays to an LCD
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
LC_ALL=C
LC_NUMERIC=C
# Retrieve stats from FTL engine
pihole-FTL() {
local ftl_port LINE
# shellcheck disable=SC1091
. /opt/pihole/utils.sh
ftl_port=$(getFTLAPIPort)
if [[ -n "$ftl_port" ]]; then
# Open connection to FTL
exec 3<>"/dev/tcp/127.0.0.1/$ftl_port"
# Test if connection is open
if { "true" >&3; } 2> /dev/null; then
# Send command to FTL and ask to quit when finished
echo -e ">$1 >quit" >&3
# Read input until we received an empty string and the connection is
# closed
read -r -t 1 LINE <&3
until [[ -z "${LINE}" ]] && [[ ! -t 3 ]]; do
echo "$LINE" >&1
read -r -t 1 LINE <&3
done
# Close connection
exec 3>&-
exec 3<&-
fi
else
echo "0"
fi
}
# Print spaces to align right-side additional text
printFunc() {
local text_last
title="$1"
title_len="${#title}"
text_main="$2"
text_main_nocol="$text_main"
if [[ "${text_main:0:1}" == "" ]]; then
text_main_nocol=$(sed 's/\[[0-9;]\{1,5\}m//g' <<< "$text_main")
fi
text_main_len="${#text_main_nocol}"
text_addn="$3"
if [[ "$text_addn" == "last" ]]; then
text_addn=""
text_last="true"
fi
# If there is additional text, define max length of text_main
if [[ -n "$text_addn" ]]; then
case "$scr_cols" in
[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-4]) text_main_max_len="9";;
4[5-9]) text_main_max_len="14";;
*) text_main_max_len="19";;
esac
fi
[[ -z "$text_addn" ]] && text_main_max_len="$(( scr_cols - title_len ))"
# Remove excess characters from main text
if [[ "$text_main_len" -gt "$text_main_max_len" ]]; then
# Trim text without colors
text_main_trim="${text_main_nocol:0:$text_main_max_len}"
# Replace with trimmed text
text_main="${text_main/$text_main_nocol/$text_main_trim}"
fi
# Determine amount of spaces for each line
if [[ -n "$text_last" ]]; then
# Move cursor to end of screen
spc_num=$(( scr_cols - ( title_len + text_main_len ) ))
else
spc_num=$(( text_main_max_len - text_main_len ))
fi
[[ "$spc_num" -le 0 ]] && spc_num="0"
spc=$(printf "%${spc_num}s")
#spc="${spc// /.}" # Debug: Visualize spaces
printf "%s%s$spc" "$title" "$text_main"
if [[ -n "$text_addn" ]]; then
printf "%s(%s)%s\\n" "$COL_NC$COL_DARK_GRAY" "$text_addn" "$COL_NC"
else
# Do not print trailing newline on final line
[[ -z "$text_last" ]] && printf "%s\\n" "$COL_NC"
fi
}
# Perform on first Chrono run (not for JSON formatted string)
get_init_stats() {
calcFunc(){ awk "BEGIN {print $*}" 2> /dev/null; }
# Convert bytes to human-readable format
hrBytes() {
awk '{
num=$1;
if(num==0) {
print "0 B"
} else {
xxx=(num<0?-num:num)
sss=(num<0?-1:1)
split("B KB MB GB TB PB",type)
for(i=5;yyy < 1;i--) {
yyy=xxx / (2^(10*i))
}
printf "%.0f " type[i+2], yyy*sss
}
}' <<< "$1";
}
# Convert seconds to human-readable format
hrSecs() {
day=$(( $1/60/60/24 )); hrs=$(( $1/3600%24 ))
mins=$(( ($1%3600)/60 )); secs=$(( $1%60 ))
[[ "$day" -ge "2" ]] && plu="s"
[[ "$day" -ge "1" ]] && days="$day day${plu}, " || days=""
printf "%s%02d:%02d:%02d\\n" "$days" "$hrs" "$mins" "$secs"
}
# Set Color Codes
coltable="/opt/pihole/COL_TABLE"
if [[ -f "${coltable}" ]]; then
source ${coltable}
else
COL_NC=""
COL_DARK_GRAY=""
COL_LIGHT_GREEN=""
COL_LIGHT_BLUE=""
COL_LIGHT_RED=""
COL_YELLOW=""
COL_LIGHT_RED=""
COL_URG_RED=""
fi
# Get RPi throttle state (RPi 3B only) & model number, or OS distro info
if command -v vcgencmd &> /dev/null; then
local sys_throttle_raw
local sys_rev_raw
sys_throttle_raw=$(vgt=$(sudo vcgencmd get_throttled); echo "${vgt##*x}")
# Active Throttle Notice: https://bit.ly/2gnunOo
if [[ "$sys_throttle_raw" != "0" ]]; then
case "$sys_throttle_raw" in
*0001) thr_type="${COL_YELLOW}Under Voltage";;
*0002) thr_type="${COL_LIGHT_BLUE}Arm Freq Cap";;
*0003) thr_type="${COL_YELLOW}UV${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_BLUE}AFC";;
*0004) thr_type="${COL_LIGHT_RED}Throttled";;
*0005) thr_type="${COL_YELLOW}UV${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_RED}TT";;
*0006) thr_type="${COL_LIGHT_BLUE}AFC${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_RED}TT";;
*0007) thr_type="${COL_YELLOW}UV${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_BLUE}AFC${COL_DARK_GRAY},${COL_NC} ${COL_LIGHT_RED}TT";;
esac
[[ -n "$thr_type" ]] && sys_throttle="$thr_type${COL_DARK_GRAY}"
fi
sys_rev_raw=$(awk '/Revision/ {print $3}' < /proc/cpuinfo)
case "$sys_rev_raw" in
000[2-6]) sys_model=" 1, Model B";; # 256MB
000[7-9]) sys_model=" 1, Model A";; # 256MB
000d|000e|000f) sys_model=" 1, Model B";; # 512MB
0010|0013) sys_model=" 1, Model B+";; # 512MB
0012|0015) sys_model=" 1, Model A+";; # 256MB
a0104[0-1]|a21041|a22042) sys_model=" 2, Model B";; # 1GB
900021) sys_model=" 1, Model A+";; # 512MB
900032) sys_model=" 1, Model B+";; # 512MB
90009[2-3]|920093) sys_model=" Zero";; # 512MB
9000c1) sys_model=" Zero W";; # 512MB
a02082|a[2-3]2082) sys_model=" 3, Model B";; # 1GB
a020d3) sys_model=" 3, Model B+";; # 1GB
*) sys_model="";;
esac
sys_type="Raspberry Pi$sys_model"
else
source "/etc/os-release"
CODENAME=$(sed 's/[()]//g' <<< "${VERSION/* /}")
sys_type="${NAME/ */} ${CODENAME^} $VERSION_ID"
fi
# Get core count
sys_cores=$(grep -c "^processor" /proc/cpuinfo)
# Test existence of clock speed file for ARM CPU
if [[ -f "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq" ]]; then
scaling_freq_file="/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq"
fi
# Test existence of temperature file
if [[ -f "/sys/class/thermal/thermal_zone0/temp" ]]; then
temp_file="/sys/class/thermal/thermal_zone0/temp"
elif [[ -f "/sys/class/hwmon/hwmon0/temp1_input" ]]; then
temp_file="/sys/class/hwmon/hwmon0/temp1_input"
else
temp_file=""
fi
# Test existence of setupVars config
if [[ -f "/etc/pihole/setupVars.conf" ]]; then
setupVars="/etc/pihole/setupVars.conf"
fi
}
get_sys_stats() {
local ph_ver_raw
local cpu_raw
local ram_raw
local disk_raw
# Update every 12 refreshes (Def: every 60s)
count=$((count+1))
if [[ "$count" == "1" ]] || (( "$count" % 12 == 0 )); then
# Do not source setupVars if file does not exist
[[ -n "$setupVars" ]] && source "$setupVars"
mapfile -t ph_ver_raw < <(pihole -v -c 2> /dev/null | sed -n 's/^.* v/v/p')
if [[ -n "${ph_ver_raw[0]}" ]]; then
ph_core_ver="${ph_ver_raw[0]}"
if [[ ${#ph_ver_raw[@]} -eq 2 ]]; then
# web not installed
ph_lte_ver="(not installed)"
ph_ftl_ver="${ph_ver_raw[1]}"
else
ph_lte_ver="${ph_ver_raw[1]}"
ph_ftl_ver="${ph_ver_raw[2]}"
fi
else
ph_core_ver="-1"
fi
sys_name=$(hostname)
[[ -n "$TEMPERATUREUNIT" ]] && temp_unit="${TEMPERATUREUNIT^^}" || temp_unit="C"
# Get storage stats for partition mounted on /
read -r -a disk_raw <<< "$(df -B1 / 2> /dev/null | awk 'END{ print $3,$2,$5 }')"
disk_used="${disk_raw[0]}"
disk_total="${disk_raw[1]}"
disk_perc="${disk_raw[2]}"
net_gateway=$(ip route | grep default | cut -d ' ' -f 3 | head -n 1)
# Get DHCP stats, if feature is enabled
if [[ "$DHCP_ACTIVE" == "true" ]]; then
ph_dhcp_max=$(( ${DHCP_END##*.} - ${DHCP_START##*.} + 1 ))
fi
# Get DNS server count
dns_count="0"
[[ -n "${PIHOLE_DNS_1}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_2}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_3}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_4}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_5}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_6}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_7}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_8}" ]] && dns_count=$((dns_count+1))
[[ -n "${PIHOLE_DNS_9}" ]] && dns_count="$dns_count+"
fi
# Get screen size
read -r -a scr_size <<< "$(stty size 2>/dev/null || echo 24 80)"
scr_lines="${scr_size[0]}"
scr_cols="${scr_size[1]}"
# Determine Chronometer size behavior
if [[ "$scr_cols" -ge 58 ]]; then
chrono_width="large"
elif [[ "$scr_cols" -gt 40 ]]; then
chrono_width="medium"
else
chrono_width="small"
fi
# Determine max length of divider string
scr_line_len=$(( scr_cols - 2 ))
[[ "$scr_line_len" -ge 58 ]] && scr_line_len="58"
scr_line_str=$(printf "%${scr_line_len}s")
scr_line_str="${scr_line_str// /—}"
sys_uptime=$(hrSecs "$(cut -d. -f1 /proc/uptime)")
sys_loadavg=$(cut -d " " -f1,2,3 /proc/loadavg)
# Get CPU usage, only counting processes over 1% as active
# shellcheck disable=SC2009
cpu_raw=$(ps -eo pcpu,rss --no-headers | grep -E -v " 0")
cpu_tasks=$(wc -l <<< "$cpu_raw")
cpu_taskact=$(sed -r "/(^ 0.)/d" <<< "$cpu_raw" | wc -l)
cpu_perc=$(awk '{sum+=$1} END {printf "%.0f\n", sum/'"$sys_cores"'}' <<< "$cpu_raw")
# Get CPU clock speed
if [[ -n "$scaling_freq_file" ]]; then
cpu_mhz=$(( $(< /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq) / 1000 ))
else
cpu_mhz=$(lscpu | awk -F ":" '/MHz/ {print $2;exit}')
cpu_mhz=$(printf "%.0f" "${cpu_mhz//[[:space:]]/}")
fi
# Determine whether to display CPU clock speed as MHz or GHz
if [[ -n "$cpu_mhz" ]]; then
[[ "$cpu_mhz" -le "999" ]] && cpu_freq="$cpu_mhz MHz" || cpu_freq="$(printf "%.1f" $(calcFunc "$cpu_mhz"/1000)) GHz"
[[ "${cpu_freq}" == *".0"* ]] && cpu_freq="${cpu_freq/.0/}"
fi
# Determine color for temperature
if [[ -n "$temp_file" ]]; then
if [[ "$temp_unit" == "C" ]]; then
cpu_temp=$(printf "%.0fc\\n" "$(calcFunc "$(< $temp_file) / 1000")")
case "${cpu_temp::-1}" in
-*|[0-9]|[1-3][0-9]) cpu_col="$COL_LIGHT_BLUE";;
4[0-9]) cpu_col="";;
5[0-9]) cpu_col="$COL_YELLOW";;
6[0-9]) cpu_col="$COL_LIGHT_RED";;
*) cpu_col="$COL_URG_RED";;
esac
# $COL_NC$COL_DARK_GRAY is needed for $COL_URG_RED
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
elif [[ "$temp_unit" == "F" ]]; then
cpu_temp=$(printf "%.0ff\\n" "$(calcFunc "($(< $temp_file) / 1000) * 9 / 5 + 32")")
case "${cpu_temp::-1}" in
-*|[0-9]|[0-9][0-9]) cpu_col="$COL_LIGHT_BLUE";;
1[0-1][0-9]) cpu_col="";;
1[2-3][0-9]) cpu_col="$COL_YELLOW";;
1[4-5][0-9]) cpu_col="$COL_LIGHT_RED";;
*) cpu_col="$COL_URG_RED";;
esac
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
else
cpu_temp_str=$(printf " @ %.0fk\\n" "$(calcFunc "($(< $temp_file) / 1000) + 273.15")")
fi
else
cpu_temp_str=""
fi
read -r -a ram_raw <<< "$(awk '/MemTotal:/{total=$2} /MemFree:/{free=$2} /Buffers:/{buffers=$2} /^Cached:/{cached=$2} END {printf "%.0f %.0f %.0f", (total-free-buffers-cached)*100/total, (total-free-buffers-cached)*1024, total*1024}' /proc/meminfo)"
ram_perc="${ram_raw[0]}"
ram_used="${ram_raw[1]}"
ram_total="${ram_raw[2]}"
if [[ "$(pihole status web 2> /dev/null)" -ge "1" ]]; then
ph_status="${COL_LIGHT_GREEN}Active"
else
ph_status="${COL_LIGHT_RED}Offline"
fi
if [[ "$DHCP_ACTIVE" == "true" ]]; then
local ph_dhcp_range
ph_dhcp_range=$(seq -s "|" -f "${DHCP_START%.*}.%g" "${DHCP_START##*.}" "${DHCP_END##*.}")
# Count dynamic leases from available range, and not static leases
ph_dhcp_num=$(grep -cE "$ph_dhcp_range" "/etc/pihole/dhcp.leases")
ph_dhcp_percent=$(( ph_dhcp_num * 100 / ph_dhcp_max ))
fi
}
get_ftl_stats() {
local stats_raw
mapfile -t stats_raw < <(pihole-FTL "stats")
domains_being_blocked_raw="${stats_raw[0]#* }"
dns_queries_today_raw="${stats_raw[1]#* }"
ads_blocked_today_raw="${stats_raw[2]#* }"
ads_percentage_today_raw="${stats_raw[3]#* }"
queries_forwarded_raw="${stats_raw[5]#* }"
queries_cached_raw="${stats_raw[6]#* }"
# Only retrieve these stats when not called from jsonFunc
if [[ -z "$1" ]]; then
local top_ad_raw
local top_domain_raw
local top_client_raw
domains_being_blocked=$(printf "%.0f\\n" "${domains_being_blocked_raw}" 2> /dev/null)
dns_queries_today=$(printf "%.0f\\n" "${dns_queries_today_raw}")
ads_blocked_today=$(printf "%.0f\\n" "${ads_blocked_today_raw}")
ads_percentage_today=$(printf "%'.0f\\n" "${ads_percentage_today_raw}")
queries_cached_percentage=$(printf "%.0f\\n" "$(calcFunc "$queries_cached_raw * 100 / ( $queries_forwarded_raw + $queries_cached_raw )")")
recent_blocked=$(pihole-FTL recentBlocked)
read -r -a top_ad_raw <<< "$(pihole-FTL "top-ads (1)")"
read -r -a top_domain_raw <<< "$(pihole-FTL "top-domains (1)")"
read -r -a top_client_raw <<< "$(pihole-FTL "top-clients (1)")"
top_ad="${top_ad_raw[2]}"
top_domain="${top_domain_raw[2]}"
if [[ "${top_client_raw[3]}" ]]; then
top_client="${top_client_raw[3]}"
else
top_client="${top_client_raw[2]}"
fi
fi
}
get_strings() {
# Expand or contract strings depending on screen size
if [[ "$chrono_width" == "large" ]]; then
phc_str=" ${COL_DARK_GRAY}Core"
lte_str=" ${COL_DARK_GRAY}Web"
ftl_str=" ${COL_DARK_GRAY}FTL"
api_str="${COL_LIGHT_RED}API Offline"
host_info="$sys_type"
sys_info="$sys_throttle"
sys_info2="Active: $cpu_taskact of $cpu_tasks tasks"
used_str="Used: "
leased_str="Leased: "
domains_being_blocked=$(printf "%'.0f" "$domains_being_blocked")
ads_blocked_today=$(printf "%'.0f" "$ads_blocked_today")
dns_queries_today=$(printf "%'.0f" "$dns_queries_today")
ph_info="Blocking: $domains_being_blocked sites"
total_str="Total: "
else
phc_str=" ${COL_DARK_GRAY}Core"
lte_str=" ${COL_DARK_GRAY}Web"
ftl_str=" ${COL_DARK_GRAY}FTL"
api_str="${COL_LIGHT_RED}API Down"
ph_info="$domains_being_blocked blocked"
fi
[[ "$sys_cores" -ne 1 ]] && sys_cores_txt="${sys_cores}x "
cpu_info="$sys_cores_txt$cpu_freq$cpu_temp_str"
ram_info="$used_str$(hrBytes "$ram_used") of $(hrBytes "$ram_total")"
disk_info="$used_str$(hrBytes "$disk_used") of $(hrBytes "$disk_total")"
lan_info="Gateway: $net_gateway"
dhcp_info="$leased_str$ph_dhcp_num of $ph_dhcp_max"
ads_info="$total_str$ads_blocked_today of $dns_queries_today"
dns_info="$dns_count DNS servers"
[[ "$recent_blocked" == "0" ]] && recent_blocked="${COL_LIGHT_RED}FTL offline${COL_NC}"
}
chronoFunc() {
local extra_arg="$1"
local extra_value="$2"
get_init_stats
for (( ; ; )); do
get_sys_stats
get_ftl_stats
get_strings
# Strip excess development version numbers
if [[ "$ph_core_ver" != "-1" ]]; then
phc_ver_str="$phc_str: ${ph_core_ver%-*}${COL_NC}"
lte_ver_str="$lte_str: ${ph_lte_ver%-*}${COL_NC}"
ftl_ver_str="$ftl_str: ${ph_ftl_ver%-*}${COL_NC}"
else
phc_ver_str="$phc_str: $api_str${COL_NC}"
fi
# Get refresh number
if [[ "${extra_arg}" = "refresh" ]]; then
num="${extra_value}"
num_str="Refresh set for every $num seconds"
else
num_str=""
fi
clear
# Remove exit message heading on third refresh
if [[ "$count" -le 2 ]] && [[ "${extra_arg}" != "exit" ]]; then
echo -e " ${COL_LIGHT_GREEN}Pi-hole Chronometer${COL_NC}
$num_str
${COL_LIGHT_RED}Press Ctrl-C to exit${COL_NC}
${COL_DARK_GRAY}$scr_line_str${COL_NC}"
else
echo -e "|¯¯¯(¯)_|¯|_ ___|¯|___$phc_ver_str\\n| ¯_/¯|_| ' \\/ _ \\ / -_)$lte_ver_str\\n|_| |_| |_||_\\___/_\\___|$ftl_ver_str\\n ${COL_DARK_GRAY}$scr_line_str${COL_NC}"
fi
printFunc " Hostname: " "$sys_name" "$host_info"
printFunc " Uptime: " "$sys_uptime" "$sys_info"
printFunc " Task Load: " "$sys_loadavg" "$sys_info2"
printFunc " CPU usage: " "$cpu_perc%" "$cpu_info"
printFunc " RAM usage: " "$ram_perc%" "$ram_info"
printFunc " HDD usage: " "$disk_perc" "$disk_info"
if [[ "$DHCP_ACTIVE" == "true" ]]; then
printFunc "DHCP usage: " "$ph_dhcp_percent%" "$dhcp_info"
fi
printFunc " Pi-hole: " "$ph_status" "$ph_info"
printFunc " Blocked: " "$ads_percentage_today%" "$ads_info"
printFunc "Local Qrys: " "$queries_cached_percentage%" "$dns_info"
printFunc "Last Block: " "$recent_blocked"
printFunc " Top Block: " "$top_ad"
# Provide more stats on screens with more lines
if [[ "$scr_lines" -eq 17 ]]; then
if [[ "$DHCP_ACTIVE" == "true" ]]; then
printFunc "Top Domain: " "$top_domain" "last"
else
print_client="true"
fi
else
print_client="true"
fi
if [[ -n "$print_client" ]]; then
printFunc "Top Domain: " "$top_domain"
printFunc "Top Client: " "$top_client" "last"
fi
# Handle exit/refresh options
if [[ "${extra_arg}" == "exit" ]]; then
exit 0
else
if [[ "${extra_arg}" == "refresh" ]]; then
sleep "$num"
else
sleep 5
fi
fi
done
}
jsonFunc() {
get_ftl_stats "json"
echo "{\"domains_being_blocked\":${domains_being_blocked_raw},\"dns_queries_today\":${dns_queries_today_raw},\"ads_blocked_today\":${ads_blocked_today_raw},\"ads_percentage_today\":${ads_percentage_today_raw}}"
}
helpFunc() {
if [[ "$1" == "?" ]]; then
echo "Unknown option. Please view 'pihole -c --help' for more information"
else
echo "Usage: pihole -c [options]
Example: 'pihole -c -j'
Calculates stats and displays to an LCD
Options:
-j, --json Output stats as JSON formatted string
-r, --refresh Set update frequency (in seconds)
-e, --exit Output stats and exit without refreshing
-h, --help Display this help text"
fi
exit 0
}
if [[ $# = 0 ]]; then
chronoFunc
fi
case "$1" in
"-j" | "--json" ) jsonFunc;;
"-h" | "--help" ) helpFunc;;
"-r" | "--refresh" ) chronoFunc refresh "$2";;
"-e" | "--exit" ) chronoFunc exit;;
* ) helpFunc "?";;
esac

@ -18,6 +18,11 @@ upgrade_gravityDB(){
piholeDir="${2}" piholeDir="${2}"
auditFile="${piholeDir}/auditlog.list" auditFile="${piholeDir}/auditlog.list"
# Exit early if the database does not exist (e.g. in CI tests)
if [[ ! -f "${database}" ]]; then
return
fi
# Get database version # Get database version
version="$(pihole-FTL sqlite3 -ni "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")" version="$(pihole-FTL sqlite3 -ni "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
@ -128,4 +133,30 @@ upgrade_gravityDB(){
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/14_to_15.sql" pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/14_to_15.sql"
version=15 version=15
fi fi
if [[ "$version" == "15" ]]; then
# Add column abp_entries to adlist table
echo -e " ${INFO} Upgrading gravity database from version 15 to 16"
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/15_to_16.sql"
version=16
fi
if [[ "$version" == "16" ]]; then
# Add antigravity table
# Add column type to adlist table (to support adlist types)
echo -e " ${INFO} Upgrading gravity database from version 16 to 17"
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/16_to_17.sql"
version=17
fi
if [[ "$version" == "17" ]]; then
# Add adlist.id to vw_gravity and vw_antigravity
echo -e " ${INFO} Upgrading gravity database from version 17 to 18"
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/17_to_18.sql"
version=18
fi
if [[ "$version" == "18" ]]; then
# Modify DELETE triggers to delete BEFORE instead of AFTER to prevent
# foreign key constraint violations
echo -e " ${INFO} Upgrading gravity database from version 18 to 19"
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/18_to_19.sql"
version=19
fi
} }

@ -0,0 +1,11 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
ALTER TABLE adlist ADD COLUMN abp_entries INTEGER NOT NULL DEFAULT 0;
UPDATE info SET value = 16 WHERE property = 'version';
COMMIT;

@ -0,0 +1,27 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
ALTER TABLE adlist ADD COLUMN type INTEGER NOT NULL DEFAULT 0;
UPDATE adlist SET type = 0;
CREATE VIEW vw_antigravity AS SELECT domain, adlist_by_group.group_id AS group_id
FROM antigravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1) AND adlist.type = 1;
DROP VIEW vw_adlist;
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id, type
FROM adlist
WHERE enabled = 1
ORDER BY id;
UPDATE info SET value = 17 WHERE property = 'version';
COMMIT;

@ -0,0 +1,25 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP VIEW vw_gravity;
CREATE VIEW vw_gravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
FROM gravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
LEFT JOIN adlist ON adlist.id = gravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
DROP VIEW vw_antigravity;
CREATE VIEW vw_antigravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
FROM antigravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1) AND adlist.type = 1;
UPDATE info SET value = 18 WHERE property = 'version';
COMMIT;

@ -0,0 +1,27 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP TRIGGER tr_domainlist_delete;
CREATE TRIGGER tr_domainlist_delete BEFORE DELETE ON domainlist
BEGIN
DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
END;
DROP TRIGGER tr_adlist_delete;
CREATE TRIGGER tr_adlist_delete BEFORE DELETE ON adlist
BEGIN
DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
END;
DROP TRIGGER tr_client_delete;
CREATE TRIGGER tr_client_delete BEFORE DELETE ON client
BEGIN
DELETE FROM client_by_group WHERE client_id = OLD.id;
END;
UPDATE info SET value = 19 WHERE property = 'version';
COMMIT;

@ -16,15 +16,12 @@ source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
# webInterfaceDir set in basic-install.sh # webInterfaceDir set in basic-install.sh
# piholeGitURL set in basic-install.sh # piholeGitURL set in basic-install.sh
# is_repo() sourced from basic-install.sh # is_repo() sourced from basic-install.sh
# setupVars set in basic-install.sh
# check_download_exists sourced from basic-install.sh # check_download_exists sourced from basic-install.sh
# fully_fetch_repo sourced from basic-install.sh # fully_fetch_repo sourced from basic-install.sh
# get_available_branches sourced from basic-install.sh # get_available_branches sourced from basic-install.sh
# fetch_checkout_pull_branch sourced from basic-install.sh # fetch_checkout_pull_branch sourced from basic-install.sh
# checkout_pull_branch sourced from basic-install.sh # checkout_pull_branch sourced from basic-install.sh
source "${setupVars}"
warning1() { warning1() {
echo " Please note that changing branches severely alters your Pi-hole subsystems" echo " Please note that changing branches severely alters your Pi-hole subsystems"
echo " Features that work on the master branch, may not on a development branch" echo " Features that work on the master branch, may not on a development branch"
@ -61,12 +58,11 @@ checkout() {
echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}" echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}"
exit 1; exit 1;
fi fi
if [[ "${INSTALL_WEB_INTERFACE}" == "true" ]]; then
if ! is_repo "${webInterfaceDir}" ; then if ! is_repo "${webInterfaceDir}" ; then
echo -e " ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!" echo -e " ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}" echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}"
exit 1; exit 1;
fi
fi fi
if [[ -z "${1}" ]]; then if [[ -z "${1}" ]]; then
@ -85,11 +81,9 @@ checkout() {
echo "" echo ""
echo -e " ${INFO} Pi-hole Core" echo -e " ${INFO} Pi-hole Core"
fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "development" || { echo " ${CROSS} Unable to pull Core development branch"; exit 1; } fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "development" || { echo " ${CROSS} Unable to pull Core development branch"; exit 1; }
if [[ "${INSTALL_WEB_INTERFACE}" == "true" ]]; then echo ""
echo "" echo -e " ${INFO} Web interface"
echo -e " ${INFO} Web interface" fetch_checkout_pull_branch "${webInterfaceDir}" "devel" || { echo " ${CROSS} Unable to pull Web development branch"; exit 1; }
fetch_checkout_pull_branch "${webInterfaceDir}" "devel" || { echo " ${CROSS} Unable to pull Web development branch"; exit 1; }
fi
#echo -e " ${TICK} Pi-hole Core" #echo -e " ${TICK} Pi-hole Core"
local path local path
@ -101,10 +95,8 @@ checkout() {
echo -e " ${INFO} Shortcut \"master\" detected - checking out master branches..." echo -e " ${INFO} Shortcut \"master\" detected - checking out master branches..."
echo -e " ${INFO} Pi-hole core" echo -e " ${INFO} Pi-hole core"
fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "master" || { echo " ${CROSS} Unable to pull Core master branch"; exit 1; } fetch_checkout_pull_branch "${PI_HOLE_FILES_DIR}" "master" || { echo " ${CROSS} Unable to pull Core master branch"; exit 1; }
if [[ ${INSTALL_WEB_INTERFACE} == "true" ]]; then echo -e " ${INFO} Web interface"
echo -e " ${INFO} Web interface" fetch_checkout_pull_branch "${webInterfaceDir}" "master" || { echo " ${CROSS} Unable to pull Web master branch"; exit 1; }
fetch_checkout_pull_branch "${webInterfaceDir}" "master" || { echo " ${CROSS} Unable to pull Web master branch"; exit 1; }
fi
#echo -e " ${TICK} Web Interface" #echo -e " ${TICK} Web Interface"
local path local path
path="master/${binary}" path="master/${binary}"
@ -137,7 +129,7 @@ checkout() {
exit 1 exit 1
fi fi
checkout_pull_branch "${PI_HOLE_FILES_DIR}" "${2}" checkout_pull_branch "${PI_HOLE_FILES_DIR}" "${2}"
elif [[ "${1}" == "web" ]] && [[ "${INSTALL_WEB_INTERFACE}" == "true" ]] ; then elif [[ "${1}" == "web" ]] ; then
str="Fetching branches from ${webInterfaceGitUrl}" str="Fetching branches from ${webInterfaceGitUrl}"
echo -ne " ${INFO} $str" echo -ne " ${INFO} $str"
if ! fully_fetch_repo "${webInterfaceDir}" ; then if ! fully_fetch_repo "${webInterfaceDir}" ; then
@ -172,7 +164,9 @@ checkout() {
path="${2}/${binary}" path="${2}/${binary}"
oldbranch="$(pihole-FTL -b)" oldbranch="$(pihole-FTL -b)"
if check_download_exists "$path"; then check_download_exists "$path"
local ret=$?
if [ $ret -eq 0 ]; then
echo " ${TICK} Branch ${2} exists" echo " ${TICK} Branch ${2} exists"
echo "${2}" > /etc/pihole/ftlbranch echo "${2}" > /etc/pihole/ftlbranch
chmod 644 /etc/pihole/ftlbranch chmod 644 /etc/pihole/ftlbranch
@ -183,11 +177,19 @@ checkout() {
# Update local and remote versions via updatechecker # Update local and remote versions via updatechecker
/opt/pihole/updatecheck.sh /opt/pihole/updatecheck.sh
else else
echo " ${CROSS} Requested branch \"${2}\" is not available" if [[ $ret -eq 1 ]]; then
ftlbranches=( $(git ls-remote https://github.com/pi-hole/ftl | grep 'heads' | sed 's/refs\/heads\///;s/ //g' | awk '{print $2}') ) echo " ${CROSS} Requested branch \"${2}\" is not available"
echo -e " ${INFO} Available branches for FTL are:" ftlbranches=( $(git ls-remote https://github.com/pi-hole/ftl | grep 'heads' | sed 's/refs\/heads\///;s/ //g' | awk '{print $2}') )
for e in "${ftlbranches[@]}"; do echo " - $e"; done echo -e " ${INFO} Available branches for FTL are:"
exit 1 for e in "${ftlbranches[@]}"; do echo " - $e"; done
exit 1
elif [[ $ret -eq 2 ]]; then
printf " %b Unable to download from ftl.pi-hole.net. Please check your Internet connection and try again later.\\n" "${CROSS}"
exit 1
else
printf " %b Unknown error. Please contact Pi-hole Support\\n" "${CROSS}"
exit 1
fi
fi fi
else else

@ -49,7 +49,6 @@ FAQ_HARDWARE_REQUIREMENTS="${COL_CYAN}https://docs.pi-hole.net/main/prerequisite
FAQ_HARDWARE_REQUIREMENTS_PORTS="${COL_CYAN}https://docs.pi-hole.net/main/prerequisites/#ports${COL_NC}" FAQ_HARDWARE_REQUIREMENTS_PORTS="${COL_CYAN}https://docs.pi-hole.net/main/prerequisites/#ports${COL_NC}"
FAQ_HARDWARE_REQUIREMENTS_FIREWALLD="${COL_CYAN}https://docs.pi-hole.net/main/prerequisites/#firewalld${COL_NC}" FAQ_HARDWARE_REQUIREMENTS_FIREWALLD="${COL_CYAN}https://docs.pi-hole.net/main/prerequisites/#firewalld${COL_NC}"
FAQ_GATEWAY="${COL_CYAN}https://discourse.pi-hole.net/t/why-is-a-default-gateway-important-for-pi-hole/3546${COL_NC}" FAQ_GATEWAY="${COL_CYAN}https://discourse.pi-hole.net/t/why-is-a-default-gateway-important-for-pi-hole/3546${COL_NC}"
FAQ_FTL_COMPATIBILITY="${COL_CYAN}https://github.com/pi-hole/FTL#compatibility-list${COL_NC}"
# Other URLs we may use # Other URLs we may use
FORUMS_URL="${COL_CYAN}https://discourse.pi-hole.net${COL_NC}" FORUMS_URL="${COL_CYAN}https://discourse.pi-hole.net${COL_NC}"
@ -64,10 +63,6 @@ PIHOLE_SCRIPTS_DIRECTORY="/opt/pihole"
BIN_DIRECTORY="/usr/local/bin" BIN_DIRECTORY="/usr/local/bin"
RUN_DIRECTORY="/run" RUN_DIRECTORY="/run"
LOG_DIRECTORY="/var/log/pihole" LOG_DIRECTORY="/var/log/pihole"
WEB_SERVER_LOG_DIRECTORY="/var/log/lighttpd"
WEB_SERVER_CONFIG_DIRECTORY="/etc/lighttpd"
WEB_SERVER_CONFIG_DIRECTORY_FEDORA="${WEB_SERVER_CONFIG_DIRECTORY}/conf.d"
WEB_SERVER_CONFIG_DIRECTORY_DEBIAN="${WEB_SERVER_CONFIG_DIRECTORY}/conf-enabled"
HTML_DIRECTORY="/var/www/html" HTML_DIRECTORY="/var/www/html"
WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin" WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin"
SHM_DIRECTORY="/dev/shm" SHM_DIRECTORY="/dev/shm"
@ -77,49 +72,23 @@ ETC="/etc"
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684 # https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole" PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole"
WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf"
WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf"
WEB_SERVER_PIHOLE_CONFIG_FILE_DEBIAN="${WEB_SERVER_CONFIG_DIRECTORY_DEBIAN}/15-pihole-admin.conf"
WEB_SERVER_PIHOLE_CONFIG_FILE_FEDORA="${WEB_SERVER_CONFIG_DIRECTORY_FEDORA}/pihole-admin.conf"
PIHOLE_INSTALL_LOG_FILE="${PIHOLE_DIRECTORY}/install.log" PIHOLE_INSTALL_LOG_FILE="${PIHOLE_DIRECTORY}/install.log"
PIHOLE_RAW_BLOCKLIST_FILES="${PIHOLE_DIRECTORY}/list.*" PIHOLE_RAW_BLOCKLIST_FILES="${PIHOLE_DIRECTORY}/list.*"
PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list"
PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate" PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf" PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole.toml"
PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf"
PIHOLE_CUSTOM_HOSTS_FILE="${PIHOLE_DIRECTORY}/custom.list"
PIHOLE_VERSIONS_FILE="${PIHOLE_DIRECTORY}/versions" PIHOLE_VERSIONS_FILE="${PIHOLE_DIRECTORY}/versions"
# Read the value of an FTL config key. The value is printed to stdout. # Read the value of an FTL config key. The value is printed to stdout.
#
# Args:
# 1. The key to read
# 2. The default if the setting or config does not exist
get_ftl_conf_value() { get_ftl_conf_value() {
local key=$1 local key=$1
local default=$2
local value
# Obtain key=... setting from pihole-FTL.conf
if [[ -e "$PIHOLE_FTL_CONF_FILE" ]]; then
# Constructed to return nothing when
# a) the setting is not present in the config file, or
# b) the setting is commented out (e.g. "#DBFILE=...")
value="$(sed -n -e "s/^\\s*$key=\\s*//p" ${PIHOLE_FTL_CONF_FILE})"
fi
# Test for missing value. Use default value in this case. # Obtain setting from FTL directly
if [[ -z "$value" ]]; then pihole-FTL --config "${key}"
value="$default"
fi
echo "$value"
} }
PIHOLE_GRAVITY_DB_FILE="$(get_ftl_conf_value "GRAVITYDB" "${PIHOLE_DIRECTORY}/gravity.db")" PIHOLE_GRAVITY_DB_FILE="$(get_ftl_conf_value "files.gravity")"
PIHOLE_FTL_DB_FILE="$(get_ftl_conf_value "DBFILE" "${PIHOLE_DIRECTORY}/pihole-FTL.db")" PIHOLE_FTL_DB_FILE="$(get_ftl_conf_value "files.database")"
PIHOLE_COMMAND="${BIN_DIRECTORY}/pihole" PIHOLE_COMMAND="${BIN_DIRECTORY}/pihole"
PIHOLE_COLTABLE_FILE="${BIN_DIRECTORY}/COL_TABLE" PIHOLE_COLTABLE_FILE="${BIN_DIRECTORY}/COL_TABLE"
@ -129,28 +98,21 @@ FTL_PID="${RUN_DIRECTORY}/pihole-FTL.pid"
PIHOLE_LOG="${LOG_DIRECTORY}/pihole.log" PIHOLE_LOG="${LOG_DIRECTORY}/pihole.log"
PIHOLE_LOG_GZIPS="${LOG_DIRECTORY}/pihole.log.[0-9].*" PIHOLE_LOG_GZIPS="${LOG_DIRECTORY}/pihole.log.[0-9].*"
PIHOLE_DEBUG_LOG="${LOG_DIRECTORY}/pihole_debug.log" PIHOLE_DEBUG_LOG="${LOG_DIRECTORY}/pihole_debug.log"
PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/FTL.log")" PIHOLE_FTL_LOG="$(get_ftl_conf_value "files.log.ftl")"
PIHOLE_WEBSERVER_LOG="$(get_ftl_conf_value "files.log.webserver")"
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access-pihole.log"
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error-pihole.log"
RESOLVCONF="${ETC}/resolv.conf" RESOLVCONF="${ETC}/resolv.conf"
DNSMASQ_CONF="${ETC}/dnsmasq.conf" DNSMASQ_CONF="${ETC}/dnsmasq.conf"
# Store Pi-hole's processes in an array for easy use and parsing # Store Pi-hole's processes in an array for easy use and parsing
PIHOLE_PROCESSES=( "lighttpd" "pihole-FTL" ) PIHOLE_PROCESSES=( "pihole-FTL" )
# Store the required directories in an array so it can be parsed through # Store the required directories in an array so it can be parsed through
REQUIRED_FILES=("${PIHOLE_CRON_FILE}" REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
"${WEB_SERVER_CONFIG_FILE}"
"${WEB_SERVER_CUSTOM_CONFIG_FILE}"
"${WEB_SERVER_PIHOLE_CONFIG_FILE_DEBIAN}"
"${WEB_SERVER_PIHOLE_CONFIG_FILE_FEDORA}"
"${PIHOLE_INSTALL_LOG_FILE}" "${PIHOLE_INSTALL_LOG_FILE}"
"${PIHOLE_RAW_BLOCKLIST_FILES}" "${PIHOLE_RAW_BLOCKLIST_FILES}"
"${PIHOLE_LOCAL_HOSTS_FILE}" "${PIHOLE_LOCAL_HOSTS_FILE}"
"${PIHOLE_LOGROTATE_FILE}" "${PIHOLE_LOGROTATE_FILE}"
"${PIHOLE_SETUP_VARS_FILE}"
"${PIHOLE_FTL_CONF_FILE}" "${PIHOLE_FTL_CONF_FILE}"
"${PIHOLE_COMMAND}" "${PIHOLE_COMMAND}"
"${PIHOLE_COLTABLE_FILE}" "${PIHOLE_COLTABLE_FILE}"
@ -159,11 +121,9 @@ REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
"${PIHOLE_LOG_GZIPS}" "${PIHOLE_LOG_GZIPS}"
"${PIHOLE_DEBUG_LOG}" "${PIHOLE_DEBUG_LOG}"
"${PIHOLE_FTL_LOG}" "${PIHOLE_FTL_LOG}"
"${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}" "${PIHOLE_WEBSERVER_LOG}"
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}"
"${RESOLVCONF}" "${RESOLVCONF}"
"${DNSMASQ_CONF}" "${DNSMASQ_CONF}"
"${PIHOLE_CUSTOM_HOSTS_FILE}"
"${PIHOLE_VERSIONS_FILE}") "${PIHOLE_VERSIONS_FILE}")
DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net. DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net.
@ -177,20 +137,6 @@ show_disclaimer(){
log_write "${DISCLAIMER}" log_write "${DISCLAIMER}"
} }
source_setup_variables() {
# Display the current test that is running
log_write "\\n${COL_PURPLE}*** [ INITIALIZING ]${COL_NC} Sourcing setup variables"
# If the variable file exists,
if ls "${PIHOLE_SETUP_VARS_FILE}" 1> /dev/null 2>&1; then
log_write "${INFO} Sourcing ${PIHOLE_SETUP_VARS_FILE}...";
# source it
source ${PIHOLE_SETUP_VARS_FILE}
else
# If it can't, show an error
log_write "${PIHOLE_SETUP_VARS_FILE} ${COL_RED}does not exist or cannot be read.${COL_NC}"
fi
}
make_temporary_log() { make_temporary_log() {
# Create a random temporary file for the log # Create a random temporary file for the log
TEMPLOG=$(mktemp /tmp/pihole_temp.XXXXXX) TEMPLOG=$(mktemp /tmp/pihole_temp.XXXXXX)
@ -300,17 +246,10 @@ compare_local_version_to_git_version() {
return 1 return 1
fi fi
else else
# There is no git directory so check if the web interface was disabled # Return an error message
local setup_vars_web_interface log_write "${COL_RED}Directory ${git_dir} doesn't exist${COL_NC}"
setup_vars_web_interface=$(< ${PIHOLE_SETUP_VARS_FILE} grep ^INSTALL_WEB_INTERFACE | cut -d '=' -f2) # and exit with a non zero code
if [[ "${pihole_component}" == "Web" ]] && [[ "${setup_vars_web_interface}" == "false" ]]; then return 1
log_write "${INFO} ${pihole_component}: Disabled in setupVars.conf via INSTALL_WEB_INTERFACE=false"
else
# Return an error message
log_write "${COL_RED}Directory ${git_dir} doesn't exist${COL_NC}"
# and exit with a non zero code
return 1
fi
fi fi
} }
@ -349,39 +288,6 @@ check_component_versions() {
check_ftl_version check_ftl_version
} }
get_program_version() {
local program_name="${1}"
# Create a local variable so this function can be safely reused
local program_version
echo_current_diagnostic "${program_name} version"
# Evaluate the program we are checking, if it is any of the ones below, show the version
case "${program_name}" in
"lighttpd") program_version="$(${program_name} -v 2> /dev/null | head -n1 | cut -d '/' -f2 | cut -d ' ' -f1)"
;;
"php") program_version="$(${program_name} -v 2> /dev/null | head -n1 | cut -d '-' -f1 | cut -d ' ' -f2)"
;;
# If a match is not found, show an error
*) echo "Unrecognized program";
esac
# If the program does not have a version (the variable is empty)
if [[ -z "${program_version}" ]]; then
# Display and error
log_write "${CROSS} ${COL_RED}${program_name} version could not be detected.${COL_NC}"
else
# Otherwise, display the version
log_write "${INFO} ${program_version}"
fi
}
# These are the most critical dependencies of Pi-hole, so we check for them
# and their versions, using the functions above.
check_critical_program_versions() {
# Use the function created earlier and bundle them into one function that checks all the version numbers
get_program_version "lighttpd"
get_program_version "php"
}
os_check() { os_check() {
# This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net # This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net
# and determines whether or not the script is running on one of those systems # and determines whether or not the script is running on one of those systems
@ -391,7 +297,7 @@ os_check() {
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"') detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"') detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
cmdResult="$(dig +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)" cmdResult="$(dig -4 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
#Get the return code of the previous command (last line) #Get the return code of the previous command (last line)
digReturnCode="${cmdResult##*$'\n'}" digReturnCode="${cmdResult##*$'\n'}"
@ -401,7 +307,20 @@ os_check() {
if [ "${digReturnCode}" -ne 0 ]; then if [ "${digReturnCode}" -ne 0 ]; then
log_write "${INFO} Distro: ${detected_os^}" log_write "${INFO} Distro: ${detected_os^}"
log_write "${INFO} Version: ${detected_version}" log_write "${INFO} Version: ${detected_version}"
log_write "${CROSS} dig return code: ${COL_RED}${digReturnCode}${COL_NC}" log_write "${CROSS} dig IPv4 return code: ${COL_RED}${digReturnCode}${COL_NC}"
log_write "${CROSS} dig response: ${response}"
log_write "${INFO} Retrying via IPv6"
cmdResult="$(dig -6 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
#Get the return code of the previous command (last line)
digReturnCode="${cmdResult##*$'\n'}"
# Extract dig response
response="${cmdResult%%$'\n'*}"
fi
# If also no success via IPv6
if [ "${digReturnCode}" -ne 0 ]; then
log_write "${CROSS} dig IPv6 return code: ${COL_RED}${digReturnCode}${COL_NC}"
log_write "${CROSS} dig response: ${response}" log_write "${CROSS} dig response: ${response}"
log_write "${CROSS} Error: ${COL_RED}dig command failed - Unable to check OS${COL_NC}" log_write "${CROSS} Error: ${COL_RED}dig command failed - Unable to check OS${COL_NC}"
else else
@ -549,34 +468,34 @@ check_firewalld() {
fi fi
} }
processor_check() { run_and_print_command() {
echo_current_diagnostic "Processor" # Run the command passed as an argument
# Store the processor type in a variable local cmd="${1}"
PROCESSOR=$(uname -m) # Show the command that is being run
# If it does not contain a value, log_write "${INFO} ${cmd}"
if [[ -z "${PROCESSOR}" ]]; then # Run the command and store the output in a variable
# we couldn't detect it, so show an error local output
PROCESSOR=$(lscpu | awk '/Architecture/ {print $2}') output=$(${cmd} 2>&1)
log_write "${CROSS} ${COL_RED}${PROCESSOR}${COL_NC} has not been tested with FTL, but may still work: (${FAQ_FTL_COMPATIBILITY})" # If the command was successful,
if [[ $? -eq 0 ]]; then
# show the output
log_write "${output}"
else else
# Check if the architecture is currently supported for FTL # otherwise, show an error
case "${PROCESSOR}" in log_write "${CROSS} ${COL_RED}Command failed${COL_NC}"
"amd64" | "x86_64") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
;;
"armv6l") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
;;
"armv6") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
;;
"armv7l") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
;;
"aarch64") log_write "${TICK} ${COL_GREEN}${PROCESSOR}${COL_NC}"
;;
# Otherwise, show the processor type
*) log_write "${INFO} ${PROCESSOR}";
esac
fi fi
} }
hardware_check() {
echo_current_diagnostic "System hardware configuration"
# Store the output of the command in a variable
run_and_print_command "lshw -short"
echo_current_diagnostic "Processor details"
# Store the output of the command in a variable
run_and_print_command "lscpu"
}
disk_usage() { disk_usage() {
local file_system local file_system
local hide local hide
@ -598,18 +517,6 @@ disk_usage() {
done done
} }
parse_setup_vars() {
echo_current_diagnostic "Setup variables"
# If the file exists,
if [[ -r "${PIHOLE_SETUP_VARS_FILE}" ]]; then
# parse it
parse_file "${PIHOLE_SETUP_VARS_FILE}"
else
# If not, show an error
log_write "${CROSS} ${COL_RED}Could not read ${PIHOLE_SETUP_VARS_FILE}.${COL_NC}"
fi
}
parse_locale() { parse_locale() {
local pihole_locale local pihole_locale
echo_current_diagnostic "Locale" echo_current_diagnostic "Locale"
@ -617,33 +524,6 @@ parse_locale() {
parse_file "${pihole_locale}" parse_file "${pihole_locale}"
} }
detect_ip_addresses() {
# First argument should be a 4 or a 6
local protocol=${1}
# Use ip to show the addresses for the chosen protocol
# Store the values in an array so they can be looped through
# Get the lines that are in the file(s) and store them in an array for parsing later
mapfile -t ip_addr_list < <(ip -"${protocol}" addr show dev "${PIHOLE_INTERFACE}" | awk -F ' ' '{ for(i=1;i<=NF;i++) if ($i ~ '/^inet/') print $(i+1) }')
# If there is something in the IP address list,
if [[ -n ${ip_addr_list[*]} ]]; then
# Local iterator
local i
# Display the protocol and interface
log_write "${TICK} IPv${protocol} address(es) bound to the ${PIHOLE_INTERFACE} interface:"
# Since there may be more than one IP address, store them in an array
for i in "${!ip_addr_list[@]}"; do
log_write " ${ip_addr_list[$i]}"
done
# Print a blank line just for formatting
log_write ""
else
# If there are no IPs detected, explain that the protocol is not configured
log_write "${CROSS} ${COL_RED}No IPv${protocol} address(es) found on the ${PIHOLE_INTERFACE}${COL_NC} interface.\\n"
return 1
fi
}
ping_ipv4_or_ipv6() { ping_ipv4_or_ipv6() {
# Give the first argument a readable name (a 4 or a six should be the argument) # Give the first argument a readable name (a 4 or a six should be the argument)
local protocol="${1}" local protocol="${1}"
@ -666,23 +546,30 @@ ping_gateway() {
ping_ipv4_or_ipv6 "${protocol}" ping_ipv4_or_ipv6 "${protocol}"
# Check if we are using IPv4 or IPv6 # Check if we are using IPv4 or IPv6
# Find the default gateways using IPv4 or IPv6 # Find the default gateways using IPv4 or IPv6
local gateway local gateway gateway_addr gateway_iface
log_write "${INFO} Default IPv${protocol} gateway(s):" log_write "${INFO} Default IPv${protocol} gateway(s):"
while IFS= read -r gateway; do while IFS= read -r gateway; do
log_write " ${gateway}" log_write " $(cut -d ' ' -f 3 <<< "${gateway}")%$(cut -d ' ' -f 5 <<< "${gateway}")"
done < <(ip -"${protocol}" route | grep default | grep "${PIHOLE_INTERFACE}" | cut -d ' ' -f 3) done < <(ip -"${protocol}" route | grep default)
gateway=$(ip -"${protocol}" route | grep default | grep "${PIHOLE_INTERFACE}" | cut -d ' ' -f 3 | head -n 1) gateway_addr=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 3 | head -n 1)
gateway_iface=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 5 | head -n 1)
# If there was at least one gateway # If there was at least one gateway
if [ -n "${gateway}" ]; then if [ -n "${gateway_addr}" ]; then
# Append the interface to the gateway address if it is a link-local address
if [[ "${gateway_addr}" =~ ^fe80 ]]; then
gateway="${gateway_addr}%${gateway_iface}"
else
gateway="${gateway_addr}"
fi
# Let the user know we will ping the gateway for a response # Let the user know we will ping the gateway for a response
log_write " * Pinging first gateway ${gateway}..." log_write " * Pinging first gateway ${gateway}..."
# Try to quietly ping the gateway 3 times, with a timeout of 3 seconds, using numeric output only, # Try to quietly ping the gateway 3 times, with a timeout of 3 seconds, using numeric output only,
# on the pihole interface, and tail the last three lines of the output # on the pihole interface, and tail the last three lines of the output
# If pinging the gateway is not successful, # If pinging the gateway is not successful,
if ! ${cmd} -c 1 -W 2 -n "${gateway}" -I "${PIHOLE_INTERFACE}" >/dev/null; then if ! ${cmd} -c 1 -W 2 -n "${gateway}" >/dev/null; then
# let the user know # let the user know
log_write "${CROSS} ${COL_RED}Gateway did not respond.${COL_NC} ($FAQ_GATEWAY)\\n" log_write "${CROSS} ${COL_RED}Gateway did not respond.${COL_NC} ($FAQ_GATEWAY)\\n"
# and return an error code # and return an error code
@ -735,10 +622,8 @@ compare_port_to_service_assigned() {
check_required_ports() { check_required_ports() {
echo_current_diagnostic "Ports in use" echo_current_diagnostic "Ports in use"
# Since Pi-hole needs 53, 80, and 4711, check what they are being used by # Since Pi-hole needs various ports, check what they are being used by
# so we can detect any issues # so we can detect any issues
local resolver="pihole-FTL"
local web_server="lighttpd"
local ftl="pihole-FTL" local ftl="pihole-FTL"
# Create an array for these ports in use # Create an array for these ports in use
ports_in_use=() ports_in_use=()
@ -747,6 +632,15 @@ check_required_ports() {
ports_in_use+=( "$line" ) ports_in_use+=( "$line" )
done < <( ss --listening --numeric --tcp --udp --processes --no-header ) done < <( ss --listening --numeric --tcp --udp --processes --no-header )
local ports_configured
# Get all configured ports
ports_configured="$(pihole-FTL --config "webserver.port")"
# Remove all non-didgits, split into an array at ","
ports_configured="${ports_configured//[!0-9,]/}"
mapfile -d "," -t ports_configured < <(echo "${ports_configured}")
# Add port 53
ports_configured+=("53")
# Now that we have the values stored, # Now that we have the values stored,
for i in "${!ports_in_use[@]}"; do for i in "${!ports_in_use[@]}"; do
# loop through them and assign some local variables # loop through them and assign some local variables
@ -757,17 +651,13 @@ check_required_ports() {
local port_number local port_number
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}') port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}')
# Use a case statement to determine if the right services are using the right ports # Check if the right services are using the right ports
case "$(echo "${port_number}" | rev | cut -d: -f1 | rev)" in if [[ ${ports_configured[*]} =~ $(echo "${port_number}" | rev | cut -d: -f1 | rev) ]]; then
53) compare_port_to_service_assigned "${resolver}" "${service_name}" "${protocol_type}:${port_number}" compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
;; else
80) compare_port_to_service_assigned "${web_server}" "${service_name}" "${protocol_type}:${port_number}"
;;
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
;;
# If it's not a default port that Pi-hole needs, just print it out for the user to see # If it's not a default port that Pi-hole needs, just print it out for the user to see
*) log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}"; log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}";
esac fi
done done
} }
@ -790,8 +680,6 @@ check_networking() {
# Runs through several of the functions made earlier; we just clump them # Runs through several of the functions made earlier; we just clump them
# together since they are all related to the networking aspect of things # together since they are all related to the networking aspect of things
echo_current_diagnostic "Networking" echo_current_diagnostic "Networking"
detect_ip_addresses "4"
detect_ip_addresses "6"
ping_gateway "4" ping_gateway "4"
ping_gateway "6" ping_gateway "6"
# Skip the following check if installed in docker container. Unpriv'ed containers do not have access to the information required # Skip the following check if installed in docker container. Unpriv'ed containers do not have access to the information required
@ -799,35 +687,6 @@ check_networking() {
[ -z "${DOCKER_VERSION}" ] && check_required_ports [ -z "${DOCKER_VERSION}" ] && check_required_ports
} }
check_x_headers() {
# The X-Headers allow us to determine from the command line if the Web
# lighttpd.conf has a directive to show "X-Pi-hole: A black hole for Internet advertisements."
# in the header of any Pi-holed domain
# Similarly, it will show "X-Pi-hole: The Pi-hole Web interface is working!" if you view the header returned
# when accessing the dashboard (i.e curl -I pi.hole/admin/)
# server is operating correctly
echo_current_diagnostic "Dashboard headers"
# Use curl -I to get the header and parse out just the X-Pi-hole one
local full_curl_output_dashboard
local dashboard
full_curl_output_dashboard="$(curl -Is localhost/admin/)"
dashboard=$(echo "${full_curl_output_dashboard}" | awk '/X-Pi-hole/' | tr -d '\r')
# Store what the X-Header should be in variables for comparison later
local dashboard_working
dashboard_working="X-Pi-hole: The Pi-hole Web interface is working!"
# If the X-Header matches what a working system should have,
if [[ $dashboard == "$dashboard_working" ]]; then
# then we can show a success
log_write "$TICK Web interface X-Header: ${COL_GREEN}${dashboard}${COL_NC}"
else
# Otherwise, it's a failure since the X-Headers either don't exist or have been modified in some way
log_write "$CROSS Web interface X-Header: ${COL_RED}X-Header does not match or could not be retrieved.${COL_NC}"
log_write "${COL_RED}${full_curl_output_dashboard}${COL_NC}"
fi
}
dig_at() { dig_at() {
# We need to test if Pi-hole can properly resolve domain names # We need to test if Pi-hole can properly resolve domain names
# as it is an essential piece of the software # as it is an essential piece of the software
@ -904,15 +763,29 @@ dig_at() {
# Removes CIDR and everything thereafter (e.g., scope properties) # Removes CIDR and everything thereafter (e.g., scope properties)
addresses="$(ip address show dev "${iface}" | sed "/${sed_selector} /!d;s/^.*${sed_selector} //g;s/\/.*$//g;")" addresses="$(ip address show dev "${iface}" | sed "/${sed_selector} /!d;s/^.*${sed_selector} //g;s/\/.*$//g;")"
if [ -n "${addresses}" ]; then if [ -n "${addresses}" ]; then
while IFS= read -r local_address ; do while IFS= read -r local_address ; do
# If ${local_address} is an IPv6 link-local address, append the interface name to it
if [[ "${local_address}" =~ ^fe80 ]]; then
local_address="${local_address}%${iface}"
fi
# Check if Pi-hole can use itself to block a domain # Check if Pi-hole can use itself to block a domain
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" +short "${record_type}"); then if local_dig="$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" "${record_type}")"; then
# If it can, show success # If it can, show success
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})" if [[ "${local_dig}" == *"status: NOERROR"* ]]; then
else local_dig="NOERROR"
# Otherwise, show a failure elif [[ "${local_dig}" == *"status: NXDOMAIN"* ]]; then
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})" local_dig="NXDOMAIN"
fi else
# Extract the first entry in the answer section from dig's output,
# replacing any multiple spaces and tabs with a single space
local_dig="$(echo "${local_dig}" | grep -A1 "ANSWER SECTION" | grep -v "ANSWER SECTION" | tr -s " \t" " ")"
fi
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
else
# Otherwise, show a failure
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})"
fi
done <<< "${addresses}" done <<< "${addresses}"
else else
log_write "${TICK} No IPv${protocol} address available on ${COL_CYAN}${iface}${COL_NC}" log_write "${TICK} No IPv${protocol} address available on ${COL_CYAN}${iface}${COL_NC}"
@ -985,20 +858,6 @@ ftl_full_status(){
fi fi
} }
lighttpd_test_configuration(){
# let lighttpd test it's own configuration
local lighttpd_conf_test
echo_current_diagnostic "Lighttpd configuration test"
lighttpd_conf_test=$(lighttpd -tt -f /etc/lighttpd/lighttpd.conf)
if [ -z "${lighttpd_conf_test}" ]; then
# empty output
log_write "${TICK} ${COL_GREEN}No error in lighttpd configuration${COL_NC}"
else
log_write "${CROSS} ${COL_RED}Error in lighttpd configuration${COL_NC}"
log_write " ${lighttpd_conf_test}"
fi
}
make_array_from_file() { make_array_from_file() {
local filename="${1}" local filename="${1}"
# The second argument can put a limit on how many line should be read from the file # The second argument can put a limit on how many line should be read from the file
@ -1006,8 +865,6 @@ make_array_from_file() {
local limit=${2} local limit=${2}
# A local iterator for testing if we are at the limit above # A local iterator for testing if we are at the limit above
local i=0 local i=0
# Set the array to be empty so we can start fresh when the function is used
local file_content=()
# If the file is a directory # If the file is a directory
if [[ -d "${filename}" ]]; then if [[ -d "${filename}" ]]; then
# do nothing since it cannot be parsed # do nothing since it cannot be parsed
@ -1019,11 +876,14 @@ make_array_from_file() {
new_line=$(echo "${line}" | sed -e 's/^\s*#.*$//' -e '/^$/d') new_line=$(echo "${line}" | sed -e 's/^\s*#.*$//' -e '/^$/d')
# If the line still has content (a non-zero value) # If the line still has content (a non-zero value)
if [[ -n "${new_line}" ]]; then if [[ -n "${new_line}" ]]; then
# Put it into the array
file_content+=("${new_line}") # If the string contains "### CHANGED", highlight this part in red
else if [[ "${new_line}" == *"### CHANGED"* ]]; then
# Otherwise, it's a blank line or comment, so do nothing new_line="${new_line//### CHANGED/${COL_RED}### CHANGED${COL_NC}}"
: fi
# Finally, write this line to the log
log_write " ${new_line}"
fi fi
# Increment the iterator +1 # Increment the iterator +1
i=$((i+1)) i=$((i+1))
@ -1035,12 +895,6 @@ make_array_from_file() {
break break
fi fi
done < "${filename}" done < "${filename}"
# Now the we have made an array of the file's content
for each_line in "${file_content[@]}"; do
# Print each line
# At some point, we may want to check the file line-by-line, so that's the reason for an array
log_write " ${each_line}"
done
fi fi
} }
@ -1062,8 +916,10 @@ parse_file() {
# For each line in the file, # For each line in the file,
for file_lines in "${file_info[@]}"; do for file_lines in "${file_info[@]}"; do
if [[ -n "${file_lines}" ]]; then if [[ -n "${file_lines}" ]]; then
# don't include the Web password hash # skip empty and comment lines line
[[ "${file_lines}" =~ ^\#.*$ || ! "${file_lines}" || "${file_lines}" == "WEBPASSWORD="* ]] && continue [[ "${file_lines}" =~ ^[[:space:]]*\#.*$ || ! "${file_lines}" ]] && continue
# remove the password hash from the output (*"pwhash = "*)
[[ "${file_lines}" == *"pwhash ="* ]] && file_lines=$(echo "${file_lines}" | sed -e 's/\(pwhash = \).*/\1<removed>/')
# otherwise, display the lines of the file # otherwise, display the lines of the file
log_write " ${file_lines}" log_write " ${file_lines}"
fi fi
@ -1110,12 +966,6 @@ list_files_in_dir() {
if [[ "${dir_to_parse}" == "${SHM_DIRECTORY}" ]]; then if [[ "${dir_to_parse}" == "${SHM_DIRECTORY}" ]]; then
# SHM file - we do not want to see the content, but we want to see the files and their sizes # SHM file - we do not want to see the content, but we want to see the files and their sizes
log_write "$(ls -lh "${dir_to_parse}/")" log_write "$(ls -lh "${dir_to_parse}/")"
elif [[ "${dir_to_parse}" == "${WEB_SERVER_CONFIG_DIRECTORY_FEDORA}" ]]; then
# we want to see all files files in /etc/lighttpd/conf.d
log_write "$(ls -lh "${dir_to_parse}/" 2> /dev/null )"
elif [[ "${dir_to_parse}" == "${WEB_SERVER_CONFIG_DIRECTORY_DEBIAN}" ]]; then
# we want to see all files files in /etc/lighttpd/conf.d
log_write "$(ls -lh "${dir_to_parse}/"/ 2> /dev/null )"
fi fi
# Store the files found in an array # Store the files found in an array
@ -1128,9 +978,7 @@ list_files_in_dir() {
elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \ elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_INSTALL_LOG_FILE}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_INSTALL_LOG_FILE}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_SETUP_VARS_FILE}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG_GZIPS}" ]]; then [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG_GZIPS}" ]]; then
: :
elif [[ "${dir_to_parse}" == "${DNSMASQ_D_DIRECTORY}" ]]; then elif [[ "${dir_to_parse}" == "${DNSMASQ_D_DIRECTORY}" ]]; then
@ -1145,8 +993,8 @@ list_files_in_dir() {
log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}" log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}"
# Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing) # Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing)
case "${dir_to_parse}/${each_file}" in case "${dir_to_parse}/${each_file}" in
# If it's Web server error log, give the first and last 25 lines # If it's Web server log, give the first and last 25 lines
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}") head_tail_log "${dir_to_parse}/${each_file}" 25 "${PIHOLE_WEBSERVER_LOG}") head_tail_log "${dir_to_parse}/${each_file}" 25
;; ;;
# Same for the FTL log # Same for the FTL log
"${PIHOLE_FTL_LOG}") head_tail_log "${dir_to_parse}/${each_file}" 35 "${PIHOLE_FTL_LOG}") head_tail_log "${dir_to_parse}/${each_file}" 35
@ -1177,11 +1025,7 @@ show_content_of_pihole_files() {
# Show the content of the files in each of Pi-hole's folders # Show the content of the files in each of Pi-hole's folders
show_content_of_files_in_dir "${PIHOLE_DIRECTORY}" show_content_of_files_in_dir "${PIHOLE_DIRECTORY}"
show_content_of_files_in_dir "${DNSMASQ_D_DIRECTORY}" show_content_of_files_in_dir "${DNSMASQ_D_DIRECTORY}"
show_content_of_files_in_dir "${WEB_SERVER_CONFIG_DIRECTORY}"
show_content_of_files_in_dir "${WEB_SERVER_CONFIG_DIRECTORY_FEDORA}"
show_content_of_files_in_dir "${WEB_SERVER_CONFIG_DIRECTORY_DEBIAN}"
show_content_of_files_in_dir "${CRON_D_DIRECTORY}" show_content_of_files_in_dir "${CRON_D_DIRECTORY}"
show_content_of_files_in_dir "${WEB_SERVER_LOG_DIRECTORY}"
show_content_of_files_in_dir "${LOG_DIRECTORY}" show_content_of_files_in_dir "${LOG_DIRECTORY}"
show_content_of_files_in_dir "${SHM_DIRECTORY}" show_content_of_files_in_dir "${SHM_DIRECTORY}"
show_content_of_files_in_dir "${ETC}" show_content_of_files_in_dir "${ETC}"
@ -1418,10 +1262,10 @@ spinner(){
analyze_pihole_log() { analyze_pihole_log() {
echo_current_diagnostic "Pi-hole log" echo_current_diagnostic "Pi-hole log"
local pihole_log_permissions local pihole_log_permissions
local logging_enabled local queryLogging
logging_enabled=$(grep -c "^log-queries" /etc/dnsmasq.d/01-pihole.conf) queryLogging="$(get_ftl_conf_value "dns.queryLogging")"
if [[ "${logging_enabled}" == "0" ]]; then if [[ "${queryLogging}" == "false" ]]; then
# Inform user that logging has been disabled and pihole.log does not contain queries # Inform user that logging has been disabled and pihole.log does not contain queries
log_write "${INFO} Query logging is disabled" log_write "${INFO} Query logging is disabled"
log_write "" log_write ""
@ -1518,15 +1362,12 @@ upload_to_tricorder() {
# Run through all the functions we made # Run through all the functions we made
make_temporary_log make_temporary_log
initialize_debug initialize_debug
# setupVars.conf needs to be sourced before the networking so the values are
# available to the other functions
source_setup_variables
check_component_versions check_component_versions
check_critical_program_versions # check_critical_program_versions
diagnose_operating_system diagnose_operating_system
check_selinux check_selinux
check_firewalld check_firewalld
processor_check hardware_check
disk_usage disk_usage
check_ip_command check_ip_command
check_networking check_networking
@ -1534,9 +1375,6 @@ check_name_resolution
check_dhcp_servers check_dhcp_servers
process_status process_status
ftl_full_status ftl_full_status
lighttpd_test_configuration
parse_setup_vars
check_x_headers
analyze_ftl_db analyze_ftl_db
analyze_gravity_list analyze_gravity_list
show_groups show_groups

@ -30,10 +30,10 @@ if [ -z "$DBFILE" ]; then
DBFILE="/etc/pihole/pihole-FTL.db" DBFILE="/etc/pihole/pihole-FTL.db"
fi fi
if [[ "$@" != *"quiet"* ]]; then if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Flushing /var/log/pihole/pihole.log ..." echo -ne " ${INFO} Flushing /var/log/pihole/pihole.log ..."
fi fi
if [[ "$@" == *"once"* ]]; then if [[ "$*" == *"once"* ]]; then
# Nightly logrotation # Nightly logrotation
if command -v /usr/sbin/logrotate >/dev/null; then if command -v /usr/sbin/logrotate >/dev/null; then
# Logrotate once # Logrotate once
@ -69,7 +69,7 @@ else
sudo pihole restartdns sudo pihole restartdns
fi fi
if [[ "$@" != *"quiet"* ]]; then if [[ "$*" != *"quiet"* ]]; then
echo -e "${OVER} ${TICK} Flushed /var/log/pihole/pihole.log" echo -e "${OVER} ${TICK} Flushed /var/log/pihole/pihole.log"
echo -e " ${TICK} Deleted ${deleted} queries from database" echo -e " ${TICK} Deleted ${deleted} queries from database"
fi fi

@ -1,259 +1,160 @@
#!/usr/bin/env bash #!/usr/bin/env sh
# shellcheck disable=SC1090 # shellcheck disable=SC1090
# Ignore warning about `local` being undefinded in POSIX
# shellcheck disable=SC3043
# https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2018 Pi-hole, LLC (https://pi-hole.net) # (c) 2023 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware. # Network-wide ad blocking via your own hardware.
# #
# Query Domain Lists # Search Adlists
# #
# This file is copyright under the latest version of the EUPL. # This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
# Globals # Globals
piholeDir="/etc/pihole" PI_HOLE_INSTALL_DIR="/opt/pihole"
GRAVITYDB="${piholeDir}/gravity.db" max_results="20"
options="$*" partial="false"
all="" domain=""
exact=""
matchType="match"
# Source pihole-FTL from install script
pihole_FTL="${piholeDir}/pihole-FTL.conf"
if [[ -f "${pihole_FTL}" ]]; then
source "${pihole_FTL}"
fi
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
# have changed
gravityDBfile="${GRAVITYDB}"
# Source color table
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
source "${colfile}" . "${colfile}"
if [[ "${options}" == "-h" ]] || [[ "${options}" == "--help" ]]; then # Source api functions
. "${PI_HOLE_INSTALL_DIR}/api.sh"
Help() {
echo "Usage: pihole -q [option] <domain> echo "Usage: pihole -q [option] <domain>
Example: 'pihole -q -exact domain.com' Example: 'pihole -q --partial domain.com'
Query the adlists for a specified domain Query the adlists for a specified domain
Options: Options:
-exact Search the adlists for exact domain matches --partial Search the adlists for partially matching domains
-all Return all query matches within the adlists --all Return all query matches within the adlists
-h, --help Show this help dialog" -h, --help Show this help dialog"
exit 0 exit 0
fi
# Handle valid options
[[ "${options}" == *"-all"* ]] && all=true
if [[ "${options}" == *"-exact"* ]]; then
exact="exact"; matchType="exact ${matchType}"
fi
# Strip valid options, leaving only the domain and invalid options
# This allows users to place the options before or after the domain
options=$(sed -E 's/ ?-(all|exact) ?//g' <<< "${options}")
# Handle remaining options
# If $options contain non ASCII characters, convert to punycode
case "${options}" in
"" ) str="No domain specified";;
*" "* ) str="Unknown query option specified";;
*[![:ascii:]]* ) rawDomainQuery=$(idn2 "${options}");;
* ) rawDomainQuery="${options}";;
esac
# convert the domain to lowercase
domainQuery=$(echo "${rawDomainQuery}" | tr '[:upper:]' '[:lower:]')
if [[ -n "${str:-}" ]]; then
echo -e "${str}${COL_NC}\\nTry 'pihole -q --help' for more information."
exit 1
fi
# Scan a domain again a list of RegEX
scanRegExList(){
local domain="${1}" list="${2}"
for entry in ${list}; do
if [[ "${domain}" =~ ${entry} ]]; then
printf "%b\n" "${entry}";
fi
done
} }
scanDatabaseTable() { GenerateOutput() {
local domain table list_type querystr result extra abpquerystr abpfound abpentry searchstr local data gravity_data lists_data num_gravity num_lists search_type_str
domain="$(printf "%q" "${1}")" local gravity_data_csv lists_data_csv line current_domain url type color
table="${2}" data="${1}"
list_type="${3:-}"
# As underscores are legitimate parts of domains, we escape them when using the LIKE operator.
# Underscores are SQLite wildcards matching exactly one character. We obviously want to suppress this
# behavior. The "ESCAPE '\'" clause specifies that an underscore preceded by an '\' should be matched
# as a literal underscore character. We pretreat the $domain variable accordingly to escape underscores.
if [[ "${table}" == "gravity" ]]; then
# Are there ABP entries on gravity?
# Return 1 if abp_domain=1 or Zero if abp_domain=0 or not set
abpquerystr="SELECT EXISTS (SELECT 1 FROM info WHERE property='abp_domains' and value='1')"
abpfound="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "${abpquerystr}")" 2> /dev/null
# Create search string for ABP entries only if needed
if [ "${abpfound}" -eq 1 ]; then
abpentry="${domain}"
searchstr="'||${abpentry}^'" # construct a new json for the list results where each object contains the domain and the related type
lists_data=$(printf %s "${data}" | jq '.search.domains | [.[] | {domain: .domain, type: .type}]')
# While a dot is found ... # construct a new json for the gravity results where each object contains the adlist URL and the related domains
while [ "${abpentry}" != "${abpentry/./}" ] gravity_data=$(printf %s "${data}" | jq '.search.gravity | group_by(.address,.type) | map({ address: (.[0].address), type: (.[0].type), domains: [.[] | .domain] })')
do
# ... remove text before the dot (including the dot) and append the result to $searchstr
abpentry=$(echo "${abpentry}" | cut -f 2- -d '.')
searchstr="$searchstr, '||${abpentry}^'"
done
# The final search string will look like: # number of objects in each json
# "domain IN ('||sub2.sub1.domain.com^', '||sub1.domain.com^', '||domain.com^', '||com^') OR" num_gravity=$(printf %s "${gravity_data}" | jq length)
searchstr="domain IN (${searchstr}) OR " num_lists=$(printf %s "${lists_data}" | jq length)
fi
case "${exact}" in if [ "${partial}" = true ]; then
"exact" ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain = '${domain}'";; search_type_str="partially"
* ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE ${searchstr} domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
esac
else else
case "${exact}" in search_type_str="exactly"
"exact" ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${list_type}' AND domain = '${domain}'";;
* ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${list_type}' AND domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
esac
fi fi
# Send prepared query to gravity database # Results from allow/deny list
result="$(pihole-FTL sqlite3 -ni -separator ',' "${gravityDBfile}" "${querystr}")" 2> /dev/null printf "%s\n\n" "Found ${num_lists} domains ${search_type_str} matching '${COL_BLUE}${domain}${COL_NC}'."
if [[ -z "${result}" ]]; then if [ "${num_lists}" -gt 0 ]; then
# Return early when there are no matches in this table # Convert the data to a csv, each line is a "domain,type" string
return # not using jq's @csv here as it quotes each value individually
lists_data_csv=$(printf %s "${lists_data}" | jq --raw-output '.[] | [.domain, .type] | join(",")')
# Generate output for each csv line, separating line in a domain and type substring at the ','
echo "${lists_data_csv}" | while read -r line; do
printf "%s\n\n" " - ${COL_GREEN}${line%,*}${COL_NC} (type: exact ${line#*,} domain)"
done
fi fi
if [[ "${table}" == "gravity" ]]; then # Results from gravity
echo "${result}" printf "%s\n\n" "Found ${num_gravity} adlists ${search_type_str} matching '${COL_BLUE}${domain}${COL_NC}'."
return if [ "${num_gravity}" -gt 0 ]; then
# Convert the data to a csv, each line is a "URL,domain,domain,...." string
# not using jq's @csv here as it quotes each value individually
gravity_data_csv=$(printf %s "${gravity_data}" | jq --raw-output '.[] | [.address, .type, .domains[]] | join(",")')
# Generate line-by-line output for each csv line
echo "${gravity_data_csv}" | while read -r line; do
# Get first part of the line, the URL
url=${line%%,*}
# cut off URL, leaving "type,domain,domain,...."
line=${line#*,}
type=${line%%,*}
# type == "block" -> red, type == "allow" -> green
if [ "${type}" = "block" ]; then
color="${COL_RED}"
else
color="${COL_GREEN}"
fi
# print adlist URL
printf "%s (%s)\n\n" " - ${COL_BLUE}${url}${COL_NC}" "${color}${type}${COL_NC}"
# cut off type, leaving "domain,domain,...."
line=${line#*,}
# print each domain and remove it from the string until nothing is left
while [ ${#line} -gt 0 ]; do
current_domain=${line%%,*}
printf ' - %s\n' "${COL_GREEN}${current_domain}${COL_NC}"
# we need to remove the current_domain and the comma in two steps because
# the last domain won't have a trailing comma and the while loop wouldn't exit
line=${line#"${current_domain}"}
line=${line#,}
done
printf "\n\n"
done
fi fi
# Mark domain as having been white-/blacklist matched (global variable)
wbMatch=true
# Print table name
echo " ${matchType^} found in ${COL_BOLD}exact ${table}${COL_NC}"
# Loop over results and print them
mapfile -t results <<< "${result}"
for result in "${results[@]}"; do
domain="${result/,*}"
if [[ "${result#*,}" == "0" ]]; then
extra=" (disabled)"
else
extra=""
fi
echo " ${domain}${extra}"
done
} }
scanRegexDatabaseTable() { Main() {
local domain list list_type local data
domain="${1}"
list="${2}"
list_type="${3:-}"
# Query all regex from the corresponding database tables
mapfile -t regexList < <(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${list_type}" 2> /dev/null)
# If we have regexps to process if [ -z "${domain}" ]; then
if [[ "${#regexList[@]}" -ne 0 ]]; then echo "No domain specified"
# Split regexps over a new line exit 1
str_regexList=$(printf '%s\n' "${regexList[@]}")
# Check domain against regexps
mapfile -t regexMatches < <(scanRegExList "${domain}" "${str_regexList}")
# If there were regex matches
if [[ "${#regexMatches[@]}" -ne 0 ]]; then
# Split matching regexps over a new line
str_regexMatches=$(printf '%s\n' "${regexMatches[@]}")
# Form a "matched" message
str_message="${matchType^} found in ${COL_BOLD}regex ${list}${COL_NC}"
# Form a "results" message
str_result="${COL_BOLD}${str_regexMatches}${COL_NC}"
# If we are displaying more than just the source of the block
# Set the wildcard match flag
wcMatch=true
# Echo the "matched" message, indented by one space
echo " ${str_message}"
# Echo the "results" message, each line indented by three spaces
# shellcheck disable=SC2001
echo "${str_result}" | sed 's/^/ /'
fi
fi fi
} # domains are lowercased and converted to punycode by FTL since
# https://github.com/pi-hole/FTL/pull/1715
# no need to do it here
# Scan Whitelist and Blacklist # Test if the authentication endpoint is available
scanDatabaseTable "${domainQuery}" "whitelist" "0" TestAPIAvailability
scanDatabaseTable "${domainQuery}" "blacklist" "1"
# Scan Regex table # Users can configure FTL in a way, that for accessing a) all endpoints (webserver.api.localAPIauth)
scanRegexDatabaseTable "${domainQuery}" "whitelist" "2" # or b) for the /search endpoint (webserver.api.searchAPIauth) no authentication is required.
scanRegexDatabaseTable "${domainQuery}" "blacklist" "3" # Therefore, we try to query directly without authentication but do authenticat if 401 is returned
# Query block lists data=$(GetFTLData "search/${domain}?N=${max_results}&partial=${partial}")
mapfile -t results <<< "$(scanDatabaseTable "${domainQuery}" "gravity")"
# Handle notices if [ "${data}" = 401 ]; then
if [[ -z "${wbMatch:-}" ]] && [[ -z "${wcMatch:-}" ]] && [[ -z "${results[*]}" ]]; then # Unauthenticated, so authenticate with the FTL server required
echo -e " ${INFO} No ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC} within the adlists" Authentication
exit 0
elif [[ -z "${results[*]}" ]]; then
# Result found in WL/BL/Wildcards
exit 0
elif [[ -z "${all}" ]] && [[ "${#results[*]}" -ge 100 ]]; then
echo -e " ${INFO} Over 100 ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC}
This can be overridden using the -all option"
exit 0
fi
# Print "Exact matches for" title # send query again
if [[ -n "${exact}" ]]; then data=$(GetFTLData "search/${domain}?N=${max_results}&partial=${partial}")
plural=""; [[ "${#results[*]}" -gt 1 ]] && plural="es"
echo " ${matchType^}${plural} for ${COL_BOLD}${domainQuery}${COL_NC} found in:"
fi
for result in "${results[@]}"; do
match="${result/,*/}"
extra="${result#*,}"
adlistAddress="${extra/,*/}"
extra="${extra#*,}"
if [[ "${extra}" == "0" ]]; then
extra=" (disabled)"
else
extra=""
fi fi
if [[ -n "${exact}" ]]; then GenerateOutput "${data}"
echo " - ${adlistAddress}${extra}" DeleteSession
else }
if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
count=""
echo " ${matchType^} found in ${COL_BOLD}${adlistAddress}${COL_NC}:"
adlistAddress_prev="${adlistAddress}"
fi
: $((count++))
# Print matching domain if $max_count has not been reached # Process all options (if present)
[[ -z "${all}" ]] && max_count="50" while [ "$#" -gt 0 ]; do
if [[ -z "${all}" ]] && [[ "${count}" -ge "${max_count}" ]]; then case "$1" in
[[ "${count}" -gt "${max_count}" ]] && continue "-h" | "--help") Help ;;
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}" "--partial") partial="true" ;;
else "--all") max_results=10000 ;; # hard-coded FTL limit
echo " ${match}${extra}" *) domain=$1 ;;
fi esac
fi shift
done done
exit 0 Main "${domain}"

@ -104,9 +104,6 @@ main() {
web_update=false web_update=false
FTL_update=false FTL_update=false
# shellcheck disable=1090,2154
source "${setupVars}"
# Install packages used by this installation script (necessary if users have removed e.g. git from their systems) # Install packages used by this installation script (necessary if users have removed e.g. git from their systems)
package_manager_detect package_manager_detect
install_dependent_packages "${INSTALLER_DEPS[@]}" install_dependent_packages "${INSTALLER_DEPS[@]}"
@ -128,20 +125,18 @@ main() {
echo -e " ${INFO} Pi-hole Core:\\t${COL_LIGHT_GREEN}up to date${COL_NC}" echo -e " ${INFO} Pi-hole Core:\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
fi fi
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then if ! is_repo "${ADMIN_INTERFACE_DIR}" ; then
if ! is_repo "${ADMIN_INTERFACE_DIR}" ; then echo -e "\\n ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
echo -e "\\n ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!" echo -e " Please re-run install script from https://pi-hole.net${COL_NC}"
echo -e " Please re-run install script from https://pi-hole.net${COL_NC}" exit 1;
exit 1; fi
fi
if GitCheckUpdateAvail "${ADMIN_INTERFACE_DIR}" ; then if GitCheckUpdateAvail "${ADMIN_INTERFACE_DIR}" ; then
web_update=true web_update=true
echo -e " ${INFO} Web Interface:\\t${COL_YELLOW}update available${COL_NC}" echo -e " ${INFO} Web Interface:\\t${COL_YELLOW}update available${COL_NC}"
else else
web_update=false web_update=false
echo -e " ${INFO} Web Interface:\\t${COL_LIGHT_GREEN}up to date${COL_NC}" echo -e " ${INFO} Web Interface:\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
fi
fi fi
local funcOutput local funcOutput
@ -149,7 +144,7 @@ main() {
local binary local binary
binary="pihole-FTL${funcOutput##*pihole-FTL}" #binary name will be the last line of the output of get_binary_name (it always begins with pihole-FTL) binary="pihole-FTL${funcOutput##*pihole-FTL}" #binary name will be the last line of the output of get_binary_name (it always begins with pihole-FTL)
if FTLcheckUpdate "${binary}" > /dev/null; then if FTLcheckUpdate "${binary}"; then
FTL_update=true FTL_update=true
echo -e " ${INFO} FTL:\\t\\t${COL_YELLOW}update available${COL_NC}" echo -e " ${INFO} FTL:\\t\\t${COL_YELLOW}update available${COL_NC}"
else else
@ -160,8 +155,13 @@ main() {
2) 2)
echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Branch is not available.${COL_NC}\\n\\t\\t\\tUse ${COL_LIGHT_GREEN}pihole checkout ftl [branchname]${COL_NC} to switch to a valid branch." echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Branch is not available.${COL_NC}\\n\\t\\t\\tUse ${COL_LIGHT_GREEN}pihole checkout ftl [branchname]${COL_NC} to switch to a valid branch."
;; ;;
3)
echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, cannot reach download server${COL_NC}"
exit 1
;;
*) *)
echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, contact support${COL_NC}" echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, contact support${COL_NC}"
exit 1
esac esac
FTL_update=false FTL_update=false
fi fi

@ -10,34 +10,34 @@
function get_local_branch() { function get_local_branch() {
# Return active branch # Return active branch
cd "${1}" 2> /dev/null || return 1 cd "${1}" 2>/dev/null || return 1
git rev-parse --abbrev-ref HEAD || return 1 git rev-parse --abbrev-ref HEAD || return 1
} }
function get_local_version() { function get_local_version() {
# Return active version # Return active version
cd "${1}" 2> /dev/null || return 1 cd "${1}" 2>/dev/null || return 1
git describe --tags --always 2> /dev/null || return 1 git describe --tags --always 2>/dev/null || return 1
} }
function get_local_hash() { function get_local_hash() {
cd "${1}" 2> /dev/null || return 1 cd "${1}" 2>/dev/null || return 1
git rev-parse --short=8 HEAD || return 1 git rev-parse --short=8 HEAD || return 1
} }
function get_remote_version() { function get_remote_version() {
curl -s "https://api.github.com/repos/pi-hole/${1}/releases/latest" 2> /dev/null | jq --raw-output .tag_name || return 1 # if ${2} is = "master" we need to use the "latest" endpoint, otherwise, we simply return null
if [[ "${2}" == "master" ]]; then
curl -s "https://api.github.com/repos/pi-hole/${1}/releases/latest" 2>/dev/null | jq --raw-output .tag_name || return 1
else
echo "null"
fi
} }
function get_remote_hash() {
function get_remote_hash(){ git ls-remote "https://github.com/pi-hole/${1}" --tags "${2}" | awk '{print substr($0, 1,8);}' || return 1
git ls-remote "https://github.com/pi-hole/${1}" --tags "${2}" | awk '{print substr($0, 0,8);}' || return 1
} }
# Source the setupvars config file
# shellcheck disable=SC1091
. /etc/pihole/setupVars.conf
# Source the utils file for addOrEditKeyValPair() # Source the utils file for addOrEditKeyValPair()
# shellcheck disable=SC1091 # shellcheck disable=SC1091
. /opt/pihole/utils.sh . /opt/pihole/utils.sh
@ -56,16 +56,15 @@ chmod 644 "${VERSION_FILE}"
DOCKER_TAG=$(cat /pihole.docker.tag 2>/dev/null) DOCKER_TAG=$(cat /pihole.docker.tag 2>/dev/null)
regex='^([0-9]+\.){1,2}(\*|[0-9]+)(-.*)?$|(^nightly$)|(^dev.*$)' regex='^([0-9]+\.){1,2}(\*|[0-9]+)(-.*)?$|(^nightly$)|(^dev.*$)'
if [[ ! "${DOCKER_TAG}" =~ $regex ]]; then if [[ ! "${DOCKER_TAG}" =~ $regex ]]; then
# DOCKER_TAG does not match the pattern (see https://regex101.com/r/RsENuz/1), so unset it. # DOCKER_TAG does not match the pattern (see https://regex101.com/r/RsENuz/1), so unset it.
unset DOCKER_TAG unset DOCKER_TAG
fi fi
# used in cronjob # used in cronjob
if [[ "$1" == "reboot" ]]; then if [[ "$1" == "reboot" ]]; then
sleep 30 sleep 30
fi fi
# get Core versions # get Core versions
CORE_VERSION="$(get_local_version /etc/.pihole)" CORE_VERSION="$(get_local_version /etc/.pihole)"
@ -77,33 +76,28 @@ addOrEditKeyValPair "${VERSION_FILE}" "CORE_BRANCH" "${CORE_BRANCH}"
CORE_HASH="$(get_local_hash /etc/.pihole)" CORE_HASH="$(get_local_hash /etc/.pihole)"
addOrEditKeyValPair "${VERSION_FILE}" "CORE_HASH" "${CORE_HASH}" addOrEditKeyValPair "${VERSION_FILE}" "CORE_HASH" "${CORE_HASH}"
GITHUB_CORE_VERSION="$(get_remote_version pi-hole)" GITHUB_CORE_VERSION="$(get_remote_version pi-hole "${CORE_BRANCH}")"
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_VERSION" "${GITHUB_CORE_VERSION}" addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_VERSION" "${GITHUB_CORE_VERSION}"
GITHUB_CORE_HASH="$(get_remote_hash pi-hole "${CORE_BRANCH}")" GITHUB_CORE_HASH="$(get_remote_hash pi-hole "${CORE_BRANCH}")"
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_HASH" "${GITHUB_CORE_HASH}" addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_HASH" "${GITHUB_CORE_HASH}"
# get Web versions # get Web versions
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then WEB_VERSION="$(get_local_version /var/www/html/admin)"
addOrEditKeyValPair "${VERSION_FILE}" "WEB_VERSION" "${WEB_VERSION}"
WEB_VERSION="$(get_local_version /var/www/html/admin)" WEB_BRANCH="$(get_local_branch /var/www/html/admin)"
addOrEditKeyValPair "${VERSION_FILE}" "WEB_VERSION" "${WEB_VERSION}" addOrEditKeyValPair "${VERSION_FILE}" "WEB_BRANCH" "${WEB_BRANCH}"
WEB_BRANCH="$(get_local_branch /var/www/html/admin)" WEB_HASH="$(get_local_hash /var/www/html/admin)"
addOrEditKeyValPair "${VERSION_FILE}" "WEB_BRANCH" "${WEB_BRANCH}" addOrEditKeyValPair "${VERSION_FILE}" "WEB_HASH" "${WEB_HASH}"
WEB_HASH="$(get_local_hash /var/www/html/admin)" GITHUB_WEB_VERSION="$(get_remote_version web "${WEB_BRANCH}")"
addOrEditKeyValPair "${VERSION_FILE}" "WEB_HASH" "${WEB_HASH}" addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_VERSION" "${GITHUB_WEB_VERSION}"
GITHUB_WEB_VERSION="$(get_remote_version web)" GITHUB_WEB_HASH="$(get_remote_hash web "${WEB_BRANCH}")"
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_VERSION" "${GITHUB_WEB_VERSION}" addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_HASH" "${GITHUB_WEB_HASH}"
GITHUB_WEB_HASH="$(get_remote_hash web "${WEB_BRANCH}")"
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_HASH" "${GITHUB_WEB_HASH}"
fi
# get FTL versions # get FTL versions
@ -116,13 +110,12 @@ addOrEditKeyValPair "${VERSION_FILE}" "FTL_BRANCH" "${FTL_BRANCH}"
FTL_HASH="$(pihole-FTL --hash)" FTL_HASH="$(pihole-FTL --hash)"
addOrEditKeyValPair "${VERSION_FILE}" "FTL_HASH" "${FTL_HASH}" addOrEditKeyValPair "${VERSION_FILE}" "FTL_HASH" "${FTL_HASH}"
GITHUB_FTL_VERSION="$(get_remote_version FTL)" GITHUB_FTL_VERSION="$(get_remote_version FTL "${FTL_BRANCH}")"
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_VERSION" "${GITHUB_FTL_VERSION}" addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_VERSION" "${GITHUB_FTL_VERSION}"
GITHUB_FTL_HASH="$(get_remote_hash FTL "${FTL_BRANCH}")" GITHUB_FTL_HASH="$(get_remote_hash FTL "${FTL_BRANCH}")"
addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_HASH" "${GITHUB_FTL_HASH}" addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_HASH" "${GITHUB_FTL_HASH}"
# get Docker versions # get Docker versions
if [[ "${DOCKER_TAG}" ]]; then if [[ "${DOCKER_TAG}" ]]; then

@ -25,6 +25,7 @@
# #
# Example usage: # Example usage:
# addOrEditKeyValPair "/etc/pihole/setupVars.conf" "BLOCKING_ENABLED" "true" # addOrEditKeyValPair "/etc/pihole/setupVars.conf" "BLOCKING_ENABLED" "true"
# TODO: We miight not actually need this function in v6
####################### #######################
addOrEditKeyValPair() { addOrEditKeyValPair() {
local file="${1}" local file="${1}"
@ -80,29 +81,6 @@ removeKey() {
sed -i "/^${key}/d" "${file}" sed -i "/^${key}/d" "${file}"
} }
#######################
# returns FTL's current telnet API port based on the setting in /etc/pihole-FTL.conf
########################
getFTLAPIPort(){
local FTLCONFFILE="/etc/pihole/pihole-FTL.conf"
local DEFAULT_FTL_PORT=4711
local ftl_api_port
if [ -s "$FTLCONFFILE" ]; then
# if FTLPORT is not set in pihole-FTL.conf, use the default port
ftl_api_port="$({ grep '^FTLPORT=' "${FTLCONFFILE}" || echo "${DEFAULT_FTL_PORT}"; } | cut -d'=' -f2-)"
# Exploit prevention: set the port to the default port if there is malicious (non-numeric)
# content set in pihole-FTL.conf
expr "${ftl_api_port}" : "[^[:digit:]]" > /dev/null && ftl_api_port="${DEFAULT_FTL_PORT}"
else
# if there is no pihole-FTL.conf, use the default port
ftl_api_port="${DEFAULT_FTL_PORT}"
fi
echo "${ftl_api_port}"
}
####################### #######################
# returns path of FTL's PID file # returns path of FTL's PID file
####################### #######################
@ -145,3 +123,30 @@ getFTLPID() {
FTL_PID=${FTL_PID:=-1} FTL_PID=${FTL_PID:=-1}
echo "${FTL_PID}" echo "${FTL_PID}"
} }
#######################
# returns value from FTLs config file using pihole-FTL --config
#
# Takes one argument: key
# Example getFTLConfigValue dns.piholePTR
#######################
getFTLConfigValue(){
pihole-FTL --config -q "${1}"
}
#######################
# sets value in FTLs config file using pihole-FTL --config
#
# Takes two arguments: key and value
# Example setFTLConfigValue dns.piholePTR PI.HOLE
#
# Note, for complex values such as dns.upstreams, you should wrap the value in single quotes:
# setFTLConfigValue dns.upstreams '[ "8.8.8.8" , "8.8.4.4" ]'
#######################
setFTLConfigValue(){
pihole-FTL --config "${1}" "${2}" >/dev/null
if [[ $? -eq 5 ]]; then
echo -e " ${CROSS} ${1} set by environment variable. Please unset it to use this function"
exit 5
fi
}

@ -8,9 +8,9 @@
# This file is copyright under the latest version of the EUPL. # This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
# Source the setupvars config file # Ignore warning about `local` being undefinded in POSIX
# shellcheck disable=SC1091 # shellcheck disable=SC3043
. /etc/pihole/setupVars.conf # https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
# Source the versions file poupulated by updatechecker.sh # Source the versions file poupulated by updatechecker.sh
cachedVersions="/etc/pihole/versions" cachedVersions="/etc/pihole/versions"
@ -25,126 +25,34 @@ else
. "$cachedVersions" . "$cachedVersions"
fi fi
getLocalVersion() { main() {
case ${1} in local details
"Pi-hole" ) echo "${CORE_VERSION:=N/A}";; details=false
"web" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${WEB_VERSION:=N/A}";;
"FTL" ) echo "${FTL_VERSION:=N/A}";;
esac
}
getLocalHash() {
case ${1} in
"Pi-hole" ) echo "${CORE_HASH:=N/A}";;
"web" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${WEB_HASH:=N/A}";;
"FTL" ) echo "${FTL_HASH:=N/A}";;
esac
}
getRemoteHash(){
case ${1} in
"Pi-hole" ) echo "${GITHUB_CORE_HASH:=N/A}";;
"web" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${GITHUB_WEB_HASH:=N/A}";;
"FTL" ) echo "${GITHUB_FTL_HASH:=N/A}";;
esac
}
getRemoteVersion(){
case ${1} in
"Pi-hole" ) echo "${GITHUB_CORE_VERSION:=N/A}";;
"web" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${GITHUB_WEB_VERSION:=N/A}";;
"FTL" ) echo "${GITHUB_FTL_VERSION:=N/A}";;
esac
}
getLocalBranch(){
case ${1} in
"Pi-hole" ) echo "${CORE_BRANCH:=N/A}";;
"web" ) [ "${INSTALL_WEB_INTERFACE}" = true ] && echo "${WEB_BRANCH:=N/A}";;
"FTL" ) echo "${FTL_BRANCH:=N/A}";;
esac
}
versionOutput() {
if [ "$1" = "web" ] && [ "${INSTALL_WEB_INTERFACE}" != true ]; then
echo " WebAdmin not installed"
return 1
fi
[ "$2" = "-c" ] || [ "$2" = "--current" ] || [ -z "$2" ] && current=$(getLocalVersion "${1}") && branch=$(getLocalBranch "${1}")
[ "$2" = "-l" ] || [ "$2" = "--latest" ] || [ -z "$2" ] && latest=$(getRemoteVersion "${1}")
if [ "$2" = "--hash" ]; then
[ "$3" = "-c" ] || [ "$3" = "--current" ] || [ -z "$3" ] && curHash=$(getLocalHash "${1}") && branch=$(getLocalBranch "${1}")
[ "$3" = "-l" ] || [ "$3" = "--latest" ] || [ -z "$3" ] && latHash=$(getRemoteHash "${1}") && branch=$(getLocalBranch "${1}")
fi
# We do not want to show the branch name when we are on master, # Automatically show detailed information if
# blank out the variable in this case # at least one of the components is not on master branch
if [ "$branch" = "master" ]; then if [ ! "${CORE_BRANCH}" = "master" ] || [ ! "${WEB_BRANCH}" = "master" ] || [ ! "${FTL_BRANCH}" = "master" ]; then
branch="" details=true
else
branch="$branch "
fi fi
if [ -n "$current" ] && [ -n "$latest" ]; then if [ "${details}" = true ]; then
output="${1} version is $branch$current (Latest: $latest)" echo "Core"
elif [ -n "$current" ] && [ -z "$latest" ]; then echo " Version is ${CORE_VERSION:=N/A} (Latest: ${GITHUB_CORE_VERSION:=N/A})"
output="Current ${1} version is $branch$current" echo " Branch is ${CORE_BRANCH:=N/A}"
elif [ -z "$current" ] && [ -n "$latest" ]; then echo " Hash is ${CORE_HASH:=N/A} (Latest: ${GITHUB_CORE_HASH:=N/A})"
output="Latest ${1} version is $latest" echo "Web"
elif [ -n "$curHash" ] && [ -n "$latHash" ]; then echo " Version is ${WEB_VERSION:=N/A} (Latest: ${GITHUB_WEB_VERSION:=N/A})"
output="Local ${1} hash is $curHash (Remote: $latHash)" echo " Branch is ${WEB_BRANCH:=N/A}"
elif [ -n "$curHash" ] && [ -z "$latHash" ]; then echo " Hash is ${WEB_HASH:=N/A} (Latest: ${GITHUB_WEB_HASH:=N/A})"
output="Current local ${1} hash is $curHash" echo "FTL"
elif [ -z "$curHash" ] && [ -n "$latHash" ]; then echo " Version is ${FTL_VERSION:=N/A} (Latest: ${GITHUB_FTL_VERSION:=N/A})"
output="Latest remote ${1} hash is $latHash" echo " Branch is ${FTL_BRANCH:=N/A}"
elif [ -z "$curHash" ] && [ -z "$latHash" ]; then echo " Hash is ${FTL_HASH:=N/A} (Latest: ${GITHUB_FTL_HASH:=N/A})"
output="Hashes for ${1} not available"
else else
errorOutput echo "Core version is ${CORE_VERSION:=N/A} (Latest: ${GITHUB_CORE_VERSION:=N/A})"
return 1 echo "Web version is ${WEB_VERSION:=N/A} (Latest: ${GITHUB_WEB_VERSION:=N/A})"
echo "FTL version is ${FTL_VERSION:=N/A} (Latest: ${GITHUB_FTL_VERSION:=N/A})"
fi fi
[ -n "$output" ] && echo " $output"
}
errorOutput() {
echo " Invalid Option! Try 'pihole -v --help' for more information."
exit 1
}
defaultOutput() {
versionOutput "Pi-hole" "$@"
if [ "${INSTALL_WEB_INTERFACE}" = true ]; then
versionOutput "web" "$@"
fi
versionOutput "FTL" "$@"
}
helpFunc() {
echo "Usage: pihole -v [repo | option] [option]
Example: 'pihole -v -p -l'
Show Pi-hole, Admin Console & FTL versions
Repositories:
-p, --pihole Only retrieve info regarding Pi-hole repository
-a, --admin Only retrieve info regarding web repository
-f, --ftl Only retrieve info regarding FTL repository
Options:
-c, --current Return the current version
-l, --latest Return the latest version
--hash Return the GitHub hash from your local repositories
-h, --help Show this help dialog"
exit 0
} }
case "${1}" in main
"-p" | "--pihole" ) shift; versionOutput "Pi-hole" "$@";;
"-a" | "--admin" ) shift; versionOutput "web" "$@";;
"-f" | "--ftl" ) shift; versionOutput "FTL" "$@";;
"-h" | "--help" ) helpFunc;;
* ) defaultOutput "$@";;
esac

@ -1,890 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC1090
# shellcheck disable=SC2154
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Web interface settings
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
readonly dnsmasqconfig="/etc/dnsmasq.d/01-pihole.conf"
readonly dhcpconfig="/etc/dnsmasq.d/02-pihole-dhcp.conf"
readonly FTLconf="/etc/pihole/pihole-FTL.conf"
# 03 -> wildcards
readonly dhcpstaticconfig="/etc/dnsmasq.d/04-pihole-static-dhcp.conf"
readonly dnscustomfile="/etc/pihole/custom.list"
readonly dnscustomcnamefile="/etc/dnsmasq.d/05-pihole-custom-cname.conf"
readonly gravityDBfile="/etc/pihole/gravity.db"
readonly setupVars="/etc/pihole/setupVars.conf"
readonly PI_HOLE_BIN_DIR="/usr/local/bin"
# Root of the web server
readonly webroot="/var/www/html"
# Source utils script
utilsfile="/opt/pihole/utils.sh"
source "${utilsfile}"
coltable="/opt/pihole/COL_TABLE"
if [[ -f ${coltable} ]]; then
source ${coltable}
fi
helpFunc() {
echo "Usage: pihole -a [options]
Example: pihole -a -p password
Set options for the Admin Console
Options:
-p, password Set Admin Console password
-c, celsius Set Celsius as preferred temperature unit
-f, fahrenheit Set Fahrenheit as preferred temperature unit
-k, kelvin Set Kelvin as preferred temperature unit
-h, --help Show this help dialog
-i, interface Specify dnsmasq's interface listening behavior
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)
-t, teleporter Backup configuration as an archive
-t, teleporter myname.tar.gz Backup configuration to archive with name myname.tar.gz as specified"
exit 0
}
add_setting() {
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
}
delete_setting() {
removeKey "${setupVars}" "${1}"
}
change_setting() {
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
}
addFTLsetting() {
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
}
deleteFTLsetting() {
removeKey "${FTLconf}" "${1}"
}
changeFTLsetting() {
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
}
add_dnsmasq_setting() {
addOrEditKeyValPair "${dnsmasqconfig}" "${1}" "${2}"
}
delete_dnsmasq_setting() {
removeKey "${dnsmasqconfig}" "${1}"
}
SetTemperatureUnit() {
addOrEditKeyValPair "${setupVars}" "TEMPERATUREUNIT" "${unit}"
echo -e " ${TICK} Set temperature unit to ${unit}"
}
HashPassword() {
# Compute password hash twice to avoid rainbow table vulnerability
return=$(echo -n "${1}" | sha256sum | sed 's/\s.*$//')
return=$(echo -n "${return}" | sha256sum | sed 's/\s.*$//')
echo "${return}"
}
# Check an IP address to see if it is a valid one
valid_ip() {
# Local, named variables
local ip=${1}
local stat=1
# Regex matching one IPv4 component, i.e. an integer from 0 to 255.
# See https://tools.ietf.org/html/rfc1340
local ipv4elem="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]?|0)";
# Regex matching an optional port (starting with '#') range of 1-65536
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
# Build a full IPv4 regex from the above subexpressions
local regex="^${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}${portelem}$"
# Evaluate the regex, and return the result
[[ $ip =~ ${regex} ]]
stat=$?
return "${stat}"
}
valid_ip6() {
local ip=${1}
local stat=1
# Regex matching one IPv6 element, i.e. a hex value from 0000 to FFFF
local ipv6elem="[0-9a-fA-F]{1,4}"
# Regex matching an IPv6 CIDR, i.e. 1 to 128
local v6cidr="(\\/([1-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])){0,1}"
# Regex matching an optional port (starting with '#') range of 1-65536
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
# Build a full IPv6 regex from the above subexpressions
local regex="^(((${ipv6elem}))*((:${ipv6elem}))*::((${ipv6elem}))*((:${ipv6elem}))*|((${ipv6elem}))((:${ipv6elem})){7})${v6cidr}${portelem}$"
# Evaluate the regex, and return the result
[[ ${ip} =~ ${regex} ]]
stat=$?
return "${stat}"
}
SetWebPassword() {
if [ "${SUDO_USER}" == "www-data" ]; then
echo "Security measure: user www-data is not allowed to change webUI password!"
echo "Exiting"
exit 1
fi
if [ "${SUDO_USER}" == "lighttpd" ]; then
echo "Security measure: user lighttpd is not allowed to change webUI password!"
echo "Exiting"
exit 1
fi
if (( ${#args[2]} > 0 )) ; then
readonly PASSWORD="${args[2]}"
readonly CONFIRM="${PASSWORD}"
else
# Prevents a bug if the user presses Ctrl+C and it continues to hide the text typed.
# So we reset the terminal via stty if the user does press Ctrl+C
trap '{ echo -e "\nNo password will be set" ; stty sane ; exit 1; }' INT
read -s -r -p "Enter New Password (Blank for no password): " PASSWORD
echo ""
if [ "${PASSWORD}" == "" ]; then
addOrEditKeyValPair "${setupVars}" "WEBPASSWORD" ""
echo -e " ${TICK} Password Removed"
exit 0
fi
read -s -r -p "Confirm Password: " CONFIRM
echo ""
fi
if [ "${PASSWORD}" == "${CONFIRM}" ] ; then
# We do not wrap this in brackets, otherwise BASH will expand any appropriate syntax
hash=$(HashPassword "$PASSWORD")
# Save hash to file
addOrEditKeyValPair "${setupVars}" "WEBPASSWORD" "${hash}"
echo -e " ${TICK} New password set"
else
echo -e " ${CROSS} Passwords don't match. Your password has not been changed"
exit 1
fi
}
ProcessDNSSettings() {
source "${setupVars}"
removeKey "${dnsmasqconfig}" "server"
COUNTER=1
while true ; do
var=PIHOLE_DNS_${COUNTER}
if [ -z "${!var}" ]; then
break;
fi
addKey "${dnsmasqconfig}" "server=${!var}"
(( COUNTER++ ))
done
# The option LOCAL_DNS_PORT is deprecated
# We apply it once more, and then convert it into the current format
if [ -n "${LOCAL_DNS_PORT}" ]; then
addOrEditKeyValPair "${dnsmasqconfig}" "server" "127.0.0.1#${LOCAL_DNS_PORT}"
addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_${COUNTER}" "127.0.0.1#${LOCAL_DNS_PORT}"
removeKey "${setupVars}" "LOCAL_DNS_PORT"
fi
removeKey "${dnsmasqconfig}" "domain-needed"
removeKey "${dnsmasqconfig}" "expand-hosts"
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
addKey "${dnsmasqconfig}" "domain-needed"
addKey "${dnsmasqconfig}" "expand-hosts"
fi
removeKey "${dnsmasqconfig}" "bogus-priv"
if [[ "${DNS_BOGUS_PRIV}" == true ]]; then
addKey "${dnsmasqconfig}" "bogus-priv"
fi
removeKey "${dnsmasqconfig}" "dnssec"
removeKey "${dnsmasqconfig}" "trust-anchor"
if [[ "${DNSSEC}" == true ]]; then
echo "dnssec
trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D
" >> "${dnsmasqconfig}"
fi
removeKey "${dnsmasqconfig}" "host-record"
if [ -n "${HOSTRECORD}" ]; then
addOrEditKeyValPair "${dnsmasqconfig}" "host-record" "${HOSTRECORD}"
fi
# Setup interface listening behavior of dnsmasq
removeKey "${dnsmasqconfig}" "interface"
removeKey "${dnsmasqconfig}" "local-service"
removeKey "${dnsmasqconfig}" "except-interface"
removeKey "${dnsmasqconfig}" "bind-interfaces"
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
# Listen on all interfaces, permit all origins
addOrEditKeyValPair "${dnsmasqconfig}" "except-interface" "nonexisting"
elif [[ "${DNSMASQ_LISTENING}" == "local" ]]; then
# Listen only on all interfaces, but only local subnets
addKey "${dnsmasqconfig}" "local-service"
else
# Options "bind" and "single"
# Listen only on one interface
# Use eth0 as fallback interface if interface is missing in setupVars.conf
if [ -z "${PIHOLE_INTERFACE}" ]; then
PIHOLE_INTERFACE="eth0"
fi
addOrEditKeyValPair "${dnsmasqconfig}" "interface" "${PIHOLE_INTERFACE}"
if [[ "${DNSMASQ_LISTENING}" == "bind" ]]; then
# Really bind to interface
addKey "${dnsmasqconfig}" "bind-interfaces"
fi
fi
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
# Convert legacy "conditional forwarding" to rev-server configuration
# Remove any existing REV_SERVER settings
removeKey "${setupVars}" "REV_SERVER"
removeKey "${setupVars}" "REV_SERVER_DOMAIN"
removeKey "${setupVars}" "REV_SERVER_TARGET"
removeKey "${setupVars}" "REV_SERVER_CIDR"
REV_SERVER=true
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "true"
REV_SERVER_DOMAIN="${CONDITIONAL_FORWARDING_DOMAIN}"
addOrEditKeyValPair "${setupVars}" "REV_SERVER_DOMAIN" "${REV_SERVER_DOMAIN}"
REV_SERVER_TARGET="${CONDITIONAL_FORWARDING_IP}"
addOrEditKeyValPair "${setupVars}" "REV_SERVER_TARGET" "${REV_SERVER_TARGET}"
#Convert CONDITIONAL_FORWARDING_REVERSE if necessary e.g:
# 1.1.168.192.in-addr.arpa to 192.168.1.1/32
# 1.168.192.in-addr.arpa to 192.168.1.0/24
# 168.192.in-addr.arpa to 192.168.0.0/16
# 192.in-addr.arpa to 192.0.0.0/8
if [[ "${CONDITIONAL_FORWARDING_REVERSE}" == *"in-addr.arpa" ]];then
arrRev=("${CONDITIONAL_FORWARDING_REVERSE//./ }")
case ${#arrRev[@]} in
6 ) REV_SERVER_CIDR="${arrRev[3]}.${arrRev[2]}.${arrRev[1]}.${arrRev[0]}/32";;
5 ) REV_SERVER_CIDR="${arrRev[2]}.${arrRev[1]}.${arrRev[0]}.0/24";;
4 ) REV_SERVER_CIDR="${arrRev[1]}.${arrRev[0]}.0.0/16";;
3 ) REV_SERVER_CIDR="${arrRev[0]}.0.0.0/8";;
esac
else
# Set REV_SERVER_CIDR to whatever value it was set to
REV_SERVER_CIDR="${CONDITIONAL_FORWARDING_REVERSE}"
fi
# If REV_SERVER_CIDR is not converted by the above, then use the REV_SERVER_TARGET variable to derive it
if [ -z "${REV_SERVER_CIDR}" ]; then
# Convert existing input to /24 subnet (preserves legacy behavior)
# This sed converts "192.168.1.2" to "192.168.1.0/24"
# shellcheck disable=2001
REV_SERVER_CIDR="$(sed "s+\\.[0-9]*$+\\.0/24+" <<< "${REV_SERVER_TARGET}")"
fi
addOrEditKeyValPair "${setupVars}" "REV_SERVER_CIDR" "${REV_SERVER_CIDR}"
# Remove obsolete settings from setupVars.conf
removeKey "${setupVars}" "CONDITIONAL_FORWARDING"
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_REVERSE"
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_DOMAIN"
removeKey "${setupVars}" "CONDITIONAL_FORWARDING_IP"
fi
removeKey "${dnsmasqconfig}" "rev-server"
if [[ "${REV_SERVER}" == true ]]; then
addKey "${dnsmasqconfig}" "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}"
if [ -n "${REV_SERVER_DOMAIN}" ]; then
# Forward local domain names to the CF target, too
addKey "${dnsmasqconfig}" "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}"
fi
if [[ "${DNS_FQDN_REQUIRED}" != true ]]; then
# Forward unqualified names to the CF target only when the "never
# forward non-FQDN" option is unticked
addKey "${dnsmasqconfig}" "server=//${REV_SERVER_TARGET}"
fi
fi
# We need to process DHCP settings here as well to account for possible
# changes in the non-FQDN forwarding. This cannot be done in 01-pihole.conf
# as we don't want to delete all local=/.../ lines so it's much safer to
# simply rewrite the entire corresponding config file (which is what the
# DHCP settings subroutine is doing)
ProcessDHCPSettings
}
SetDNSServers() {
# Save setting to file
removeKey "${setupVars}" "PIHOLE_DNS"
IFS=',' read -r -a array <<< "${args[2]}"
for index in "${!array[@]}"
do
# Replace possible "\#" by "#". This fixes web#1427
local ip
ip="${array[index]//\\#/#}"
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_$((index+1))" "${ip}"
else
echo -e " ${CROSS} Invalid IP has been passed"
exit 1
fi
done
if [[ "${args[3]}" == "domain-needed" ]]; then
addOrEditKeyValPair "${setupVars}" "DNS_FQDN_REQUIRED" "true"
else
addOrEditKeyValPair "${setupVars}" "DNS_FQDN_REQUIRED" "false"
fi
if [[ "${args[4]}" == "bogus-priv" ]]; then
addOrEditKeyValPair "${setupVars}" "DNS_BOGUS_PRIV" "true"
else
addOrEditKeyValPair "${setupVars}" "DNS_BOGUS_PRIV" "false"
fi
if [[ "${args[5]}" == "dnssec" ]]; then
addOrEditKeyValPair "${setupVars}" "DNSSEC" "true"
else
addOrEditKeyValPair "${setupVars}" "DNSSEC" "false"
fi
if [[ "${args[6]}" == "rev-server" ]]; then
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "true"
addOrEditKeyValPair "${setupVars}" "REV_SERVER_CIDR" "${args[7]}"
addOrEditKeyValPair "${setupVars}" "REV_SERVER_TARGET" "${args[8]}"
addOrEditKeyValPair "${setupVars}" "REV_SERVER_DOMAIN" "${args[9]}"
else
addOrEditKeyValPair "${setupVars}" "REV_SERVER" "false"
fi
ProcessDNSSettings
# Restart dnsmasq to load new configuration
RestartDNS
}
SetExcludeDomains() {
addOrEditKeyValPair "${setupVars}" "API_EXCLUDE_DOMAINS" "${args[2]}"
}
SetExcludeClients() {
addOrEditKeyValPair "${setupVars}" "API_EXCLUDE_CLIENTS" "${args[2]}"
}
Poweroff(){
nohup bash -c "sleep 5; poweroff" &> /dev/null </dev/null &
}
Reboot() {
nohup bash -c "sleep 5; reboot" &> /dev/null </dev/null &
}
RestartDNS() {
"${PI_HOLE_BIN_DIR}"/pihole restartdns
}
SetQueryLogOptions() {
addOrEditKeyValPair "${setupVars}" "API_QUERY_LOG_SHOW" "${args[2]}"
}
ProcessDHCPSettings() {
source "${setupVars}"
if [[ "${DHCP_ACTIVE}" == "true" ]]; then
interface="${PIHOLE_INTERFACE}"
# Use eth0 as fallback interface
if [ -z ${interface} ]; then
interface="eth0"
fi
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
PIHOLE_DOMAIN="lan"
addOrEditKeyValPair "${setupVars}" "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
fi
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
leasetime="infinite"
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
leasetime="24h"
addOrEditKeyValPair "${setupVars}" "DHCP_LEASETIME" "24"
else
leasetime="${DHCP_LEASETIME}h"
fi
# Write settings to file
echo "###############################################################################
# DHCP SERVER CONFIG FILE AUTOMATICALLY POPULATED BY PI-HOLE WEB INTERFACE. #
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON CHANGE #
###############################################################################
dhcp-authoritative
dhcp-range=${DHCP_START},${DHCP_END},${leasetime}
dhcp-option=option:router,${DHCP_ROUTER}
dhcp-leasefile=/etc/pihole/dhcp.leases
#quiet-dhcp
" > "${dhcpconfig}"
chmod 644 "${dhcpconfig}"
if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then
echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}"
# When there is a Pi-hole domain set and "Never forward non-FQDNs" is
# ticked, we add `local=/domain/` to tell FTL that this domain is purely
# local and FTL may answer queries from /etc/hosts or DHCP but should
# never forward queries on that domain to any upstream servers
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
echo "local=/${PIHOLE_DOMAIN}/" >> "${dhcpconfig}"
fi
fi
# Sourced from setupVars
# shellcheck disable=SC2154
if [[ "${DHCP_rapid_commit}" == "true" ]]; then
echo "dhcp-rapid-commit" >> "${dhcpconfig}"
fi
if [[ "${DHCP_IPv6}" == "true" ]]; then
echo "#quiet-dhcp6
#enable-ra
dhcp-option=option6:dns-server,[::]
dhcp-range=::,constructor:${interface},ra-names,ra-stateless,64
" >> "${dhcpconfig}"
fi
else
if [[ -f "${dhcpconfig}" ]]; then
rm "${dhcpconfig}" &> /dev/null
fi
fi
}
EnableDHCP() {
addOrEditKeyValPair "${setupVars}" "DHCP_ACTIVE" "true"
addOrEditKeyValPair "${setupVars}" "DHCP_START" "${args[2]}"
addOrEditKeyValPair "${setupVars}" "DHCP_END" "${args[3]}"
addOrEditKeyValPair "${setupVars}" "DHCP_ROUTER" "${args[4]}"
addOrEditKeyValPair "${setupVars}" "DHCP_LEASETIME" "${args[5]}"
addOrEditKeyValPair "${setupVars}" "PIHOLE_DOMAIN" "${args[6]}"
addOrEditKeyValPair "${setupVars}" "DHCP_IPv6" "${args[7]}"
addOrEditKeyValPair "${setupVars}" "DHCP_rapid_commit" "${args[8]}"
# Remove possible old setting from file
removeKey "${dnsmasqconfig}" "dhcp-"
removeKey "${dnsmasqconfig}" "quiet-dhcp"
# If a DHCP client claims that its name is "wpad", ignore that.
# This fixes a security hole. see CERT Vulnerability VU#598349
# We also ignore "localhost" as Windows behaves strangely if a
# device claims this host name
addKey "${dnsmasqconfig}" "dhcp-name-match=set:hostname-ignore,wpad
dhcp-name-match=set:hostname-ignore,localhost
dhcp-ignore-names=tag:hostname-ignore"
ProcessDHCPSettings
RestartDNS
}
DisableDHCP() {
addOrEditKeyValPair "${setupVars}" "DHCP_ACTIVE" "false"
# Remove possible old setting from file
removeKey "${dnsmasqconfig}" "dhcp-"
removeKey "${dnsmasqconfig}" "quiet-dhcp"
ProcessDHCPSettings
RestartDNS
}
SetWebUILayout() {
addOrEditKeyValPair "${setupVars}" "WEBUIBOXEDLAYOUT" "${args[2]}"
}
SetWebUITheme() {
addOrEditKeyValPair "${setupVars}" "WEBTHEME" "${args[2]}"
}
CheckUrl(){
local regex check_url
# Check for characters NOT allowed in URLs
regex="[^a-zA-Z0-9:/?&%=~._()-;]"
# this will remove first @ that is after schema and before domain
# \1 is optional schema, \2 is userinfo
check_url="$( sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<< "$1" )"
if [[ "${check_url}" =~ ${regex} ]]; then
return 1
else
return 0
fi
}
CustomizeAdLists() {
local address
address="${args[3]}"
local comment
comment="${args[4]}"
if CheckUrl "${address}"; then
if [[ "${args[2]}" == "enable" ]]; then
pihole-FTL sqlite3 -ni "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
elif [[ "${args[2]}" == "disable" ]]; then
pihole-FTL sqlite3 -ni "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
elif [[ "${args[2]}" == "add" ]]; then
pihole-FTL sqlite3 -ni "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
elif [[ "${args[2]}" == "del" ]]; then
pihole-FTL sqlite3 -ni "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
else
echo "Not permitted"
return 1
fi
else
echo "Invalid Url"
return 1
fi
}
AddDHCPStaticAddress() {
mac="${args[2]}"
ip="${args[3]}"
host="${args[4]}"
if [[ "${ip}" == "noip" ]]; then
# Static host name
echo "dhcp-host=${mac},${host}" >> "${dhcpstaticconfig}"
elif [[ "${host}" == "nohost" ]]; then
# Static IP
echo "dhcp-host=${mac},${ip}" >> "${dhcpstaticconfig}"
else
# Full info given
echo "dhcp-host=${mac},${ip},${host}" >> "${dhcpstaticconfig}"
fi
}
RemoveDHCPStaticAddress() {
mac="${args[2]}"
if [[ "$mac" =~ ^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$ ]]; then
sed -i "/dhcp-host=${mac}.*/d" "${dhcpstaticconfig}"
else
echo " ${CROSS} Invalid Mac Passed!"
exit 1
fi
}
SetListeningMode() {
source "${setupVars}"
if [[ "$3" == "-h" ]] || [[ "$3" == "--help" ]]; then
echo "Usage: pihole -a -i [interface]
Example: 'pihole -a -i local'
Specify dnsmasq's network interface listening behavior
Interfaces:
local Only respond to queries from devices that
are at most one hop away (local devices)
single Respond only on interface ${PIHOLE_INTERFACE}
bind Bind only on interface ${PIHOLE_INTERFACE}
all Listen on all interfaces, permit all origins"
exit 0
fi
if [[ "${args[2]}" == "all" ]]; then
echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "all"
elif [[ "${args[2]}" == "local" ]]; then
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "local"
elif [[ "${args[2]}" == "bind" ]]; then
echo -e " ${INFO} Binding on interface ${PIHOLE_INTERFACE}"
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "bind"
else
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "single"
fi
# Don't restart DNS server yet because other settings
# will be applied afterwards if "-web" is set
if [[ "${args[3]}" != "-web" ]]; then
ProcessDNSSettings
# Restart dnsmasq to load new configuration
RestartDNS
fi
}
Teleporter() {
local filename
filename="${args[2]}"
if [[ -z "${filename}" ]]; then
local datetimestamp
local host
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
host=$(hostname)
host="${host//./_}"
filename="pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz"
fi
php "${webroot}/admin/scripts/pi-hole/php/teleporter.php" > "${filename}"
}
checkDomain()
{
local domain validDomain
# Convert to lowercase
domain="${1,,}"
validDomain=$(grep -P "^((-|_)*[a-z0-9]((-|_)*[a-z0-9])*(-|_)*)(\\.(-|_)*([a-z0-9]((-|_)*[a-z0-9])*))*$" <<< "${domain}") # Valid chars check
validDomain=$(grep -P "^[^\\.]{1,63}(\\.[^\\.]{1,63})*$" <<< "${validDomain}") # Length of each label
echo "${validDomain}"
}
escapeDots()
{
# SC suggest bashism ${variable//search/replace}
# shellcheck disable=SC2001
escaped=$(echo "$1" | sed 's/\./\\./g')
echo "${escaped}"
}
addAudit()
{
shift # skip "-a"
shift # skip "audit"
local domains validDomain
domains=""
for domain in "$@"
do
# Check domain to be added. Only continue if it is valid
validDomain="$(checkDomain "${domain}")"
if [[ -n "${validDomain}" ]]; then
# Put comma in between domains when there is
# more than one domains to be added
# SQL INSERT allows adding multiple rows at once using the format
## INSERT INTO table (domain) VALUES ('abc.de'),('fgh.ij'),('klm.no'),('pqr.st');
if [[ -n "${domains}" ]]; then
domains="${domains},"
fi
domains="${domains}('${domain}')"
fi
done
# Insert only the domain here. The date_added field will be
# filled with its default value (date_added = current timestamp)
pihole-FTL sqlite3 -ni "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
}
clearAudit()
{
pihole-FTL sqlite3 -ni "${gravityDBfile}" "DELETE FROM domain_audit;"
}
SetPrivacyLevel() {
# Set privacy level. Minimum is 0, maximum is 3
if [ "${args[2]}" -ge 0 ] && [ "${args[2]}" -le 3 ]; then
addOrEditKeyValPair "${FTLconf}" "PRIVACYLEVEL" "${args[2]}"
pihole restartdns reload-lists
fi
}
AddCustomDNSAddress() {
echo -e " ${TICK} Adding custom DNS entry..."
ip="${args[2]}"
host="${args[3]}"
reload="${args[4]}"
validHost="$(checkDomain "${host}")"
if [[ -n "${validHost}" ]]; then
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
echo "${ip} ${validHost}" >> "${dnscustomfile}"
else
echo -e " ${CROSS} Invalid IP has been passed"
exit 1
fi
else
echo " ${CROSS} Invalid Domain passed!"
exit 1
fi
# Restart dnsmasq to load new custom DNS entries only if $reload not false
if [[ ! $reload == "false" ]]; then
RestartDNS
fi
}
RemoveCustomDNSAddress() {
echo -e " ${TICK} Removing custom DNS entry..."
ip="${args[2]}"
host="${args[3]}"
reload="${args[4]}"
validHost="$(checkDomain "${host}")"
if [[ -n "${validHost}" ]]; then
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
validHost=$(escapeDots "${validHost}")
sed -i "/^${ip} ${validHost}$/Id" "${dnscustomfile}"
else
echo -e " ${CROSS} Invalid IP has been passed"
exit 1
fi
else
echo " ${CROSS} Invalid Domain passed!"
exit 1
fi
# Restart dnsmasq to load new custom DNS entries only if reload is not false
if [[ ! $reload == "false" ]]; then
RestartDNS
fi
}
AddCustomCNAMERecord() {
echo -e " ${TICK} Adding custom CNAME record..."
domain="${args[2]}"
target="${args[3]}"
reload="${args[4]}"
validDomain="$(checkDomain "${domain}")"
if [[ -n "${validDomain}" ]]; then
validTarget="$(checkDomain "${target}")"
if [[ -n "${validTarget}" ]]; then
if [ "${validDomain}" = "${validTarget}" ]; then
echo " ${CROSS} Domain and target are the same. This would cause a DNS loop."
exit 1
else
echo "cname=${validDomain},${validTarget}" >> "${dnscustomcnamefile}"
fi
else
echo " ${CROSS} Invalid Target Passed!"
exit 1
fi
else
echo " ${CROSS} Invalid Domain passed!"
exit 1
fi
# Restart dnsmasq to load new custom CNAME records only if reload is not false
if [[ ! $reload == "false" ]]; then
RestartDNS
fi
}
RemoveCustomCNAMERecord() {
echo -e " ${TICK} Removing custom CNAME record..."
domain="${args[2]}"
target="${args[3]}"
reload="${args[4]}"
validDomain="$(checkDomain "${domain}")"
if [[ -n "${validDomain}" ]]; then
validTarget="$(checkDomain "${target}")"
if [[ -n "${validTarget}" ]]; then
validDomain=$(escapeDots "${validDomain}")
validTarget=$(escapeDots "${validTarget}")
sed -i "/^cname=${validDomain},${validTarget}$/Id" "${dnscustomcnamefile}"
else
echo " ${CROSS} Invalid Target Passed!"
exit 1
fi
else
echo " ${CROSS} Invalid Domain passed!"
exit 1
fi
# Restart dnsmasq to update removed custom CNAME records only if $reload not false
if [[ ! $reload == "false" ]]; then
RestartDNS
fi
}
SetRateLimit() {
local rate_limit_count rate_limit_interval reload
rate_limit_count="${args[2]}"
rate_limit_interval="${args[3]}"
reload="${args[4]}"
# Set rate-limit setting inf valid
if [ "${rate_limit_count}" -ge 0 ] && [ "${rate_limit_interval}" -ge 0 ]; then
addOrEditKeyValPair "${FTLconf}" "RATE_LIMIT" "${rate_limit_count}/${rate_limit_interval}"
fi
# Restart FTL to update rate-limit settings only if $reload not false
if [[ ! $reload == "false" ]]; then
RestartDNS
fi
}
main() {
args=("$@")
case "${args[1]}" in
"-p" | "password" ) SetWebPassword;;
"-c" | "celsius" ) unit="C"; SetTemperatureUnit;;
"-f" | "fahrenheit" ) unit="F"; SetTemperatureUnit;;
"-k" | "kelvin" ) unit="K"; SetTemperatureUnit;;
"setdns" ) SetDNSServers;;
"setexcludedomains" ) SetExcludeDomains;;
"setexcludeclients" ) SetExcludeClients;;
"poweroff" ) Poweroff;;
"reboot" ) Reboot;;
"restartdns" ) RestartDNS;;
"setquerylog" ) SetQueryLogOptions;;
"enabledhcp" ) EnableDHCP;;
"disabledhcp" ) DisableDHCP;;
"layout" ) SetWebUILayout;;
"theme" ) SetWebUITheme;;
"-h" | "--help" ) helpFunc;;
"addstaticdhcp" ) AddDHCPStaticAddress;;
"removestaticdhcp" ) RemoveDHCPStaticAddress;;
"-i" | "interface" ) SetListeningMode "$@";;
"-t" | "teleporter" ) Teleporter;;
"adlist" ) CustomizeAdLists;;
"audit" ) addAudit "$@";;
"clearaudit" ) clearAudit;;
"-l" | "privacylevel" ) SetPrivacyLevel;;
"addcustomdns" ) AddCustomDNSAddress;;
"removecustomdns" ) RemoveCustomDNSAddress;;
"addcustomcname" ) AddCustomCNAMERecord;;
"removecustomcname" ) RemoveCustomCNAMERecord;;
"ratelimit" ) SetRateLimit;;
* ) helpFunc;;
esac
shift
if [[ $# = 0 ]]; then
helpFunc
fi
}

@ -27,7 +27,7 @@ CREATE TABLE domainlist
CREATE TABLE adlist CREATE TABLE adlist
( (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
address TEXT UNIQUE NOT NULL, address TEXT NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1, enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)), date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)), date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
@ -35,7 +35,10 @@ CREATE TABLE adlist
date_updated INTEGER, date_updated INTEGER,
number INTEGER NOT NULL DEFAULT 0, number INTEGER NOT NULL DEFAULT 0,
invalid_domains INTEGER NOT NULL DEFAULT 0, invalid_domains INTEGER NOT NULL DEFAULT 0,
status INTEGER NOT NULL DEFAULT 0 status INTEGER NOT NULL DEFAULT 0,
abp_entries INTEGER NOT NULL DEFAULT 0,
type INTEGER NOT NULL DEFAULT 0,
UNIQUE(address, type)
); );
CREATE TABLE adlist_by_group CREATE TABLE adlist_by_group
@ -51,13 +54,19 @@ CREATE TABLE gravity
adlist_id INTEGER NOT NULL REFERENCES adlist (id) adlist_id INTEGER NOT NULL REFERENCES adlist (id)
); );
CREATE TABLE antigravity
(
domain TEXT NOT NULL,
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
);
CREATE TABLE info CREATE TABLE info
( (
property TEXT PRIMARY KEY, property TEXT PRIMARY KEY,
value TEXT NOT NULL value TEXT NOT NULL
); );
INSERT INTO "info" VALUES('version','15'); INSERT INTO "info" VALUES('version','18');
CREATE TABLE domain_audit CREATE TABLE domain_audit
( (
@ -136,14 +145,21 @@ CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist
AND domainlist.type = 3 AND domainlist.type = 3
ORDER BY domainlist.id; ORDER BY domainlist.id;
CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id CREATE VIEW vw_gravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
FROM gravity FROM gravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
LEFT JOIN adlist ON adlist.id = gravity.adlist_id LEFT JOIN adlist ON adlist.id = gravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1); WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id CREATE VIEW vw_antigravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
FROM antigravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1) AND adlist.type = 1;
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id, type
FROM adlist FROM adlist
WHERE enabled = 1 WHERE enabled = 1
ORDER BY id; ORDER BY id;

@ -19,3 +19,14 @@
notifempty notifempty
nomail nomail
} }
/var/log/pihole/webserver.log {
# su #
weekly
copytruncate
rotate 3
compress
delaycompress
notifempty
nomail
}

@ -9,22 +9,30 @@ utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# Get file paths # Get file paths
FTL_PID_FILE="$(getFTLPIDFile)" FTL_PID_FILE="$(getFTLPIDFile)"
# Touch files to ensure they exist (create if non-existing, preserve if existing) # Ensure that permissions are set so that pihole-FTL can edit all necessary files
# shellcheck disable=SC2174 # shellcheck disable=SC2174
mkdir -pm 0755 /run/pihole /var/log/pihole mkdir -pm 0640 /var/log/pihole
chown -R pihole:pihole /etc/pihole /var/log/pihole
chmod -R 0640 /var/log/pihole
chmod -R 0660 /etc/pihole
# Logrotate config file need to be owned by root and must not be writable by group and others
chown root:root /etc/pihole/logrotate
chmod 0644 /etc/pihole/logrotate
# allow all users to enter the directories
chmod 0755 /etc/pihole /var/log/pihole
# allow pihole to access subdirs in /etc/pihole (sets execution bit on dirs)
# credits https://stackoverflow.com/a/11512211
find /etc/pihole -type d -exec chmod 0755 {} \;
# Touch files to ensure they exist (create if non-existing, preserve if existing)
[ -f "${FTL_PID_FILE}" ] || install -D -m 644 -o pihole -g pihole /dev/null "${FTL_PID_FILE}" [ -f "${FTL_PID_FILE}" ] || install -D -m 644 -o pihole -g pihole /dev/null "${FTL_PID_FILE}"
[ -f /var/log/pihole/FTL.log ] || install -m 644 -o pihole -g pihole /dev/null /var/log/pihole/FTL.log [ -f /var/log/pihole/FTL.log ] || install -m 640 -o pihole -g pihole /dev/null /var/log/pihole/FTL.log
[ -f /var/log/pihole/pihole.log ] || install -m 640 -o pihole -g pihole /dev/null /var/log/pihole/pihole.log [ -f /var/log/pihole/pihole.log ] || install -m 640 -o pihole -g pihole /dev/null /var/log/pihole/pihole.log
[ -f /etc/pihole/dhcp.leases ] || install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases [ -f /etc/pihole/dhcp.leases ] || install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
chown pihole:pihole /run/pihole /etc/pihole /var/log/pihole /var/log/pihole/FTL.log /var/log/pihole/pihole.log /etc/pihole/dhcp.leases
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
chmod -f 0644 /etc/pihole/macvendor.db /etc/pihole/dhcp.leases /var/log/pihole/FTL.log
chmod -f 0640 /var/log/pihole/pihole.log
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db
# Chmod database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist
chmod -f 0664 /etc/pihole/pihole-FTL.db
# Backward compatibility for user-scripts that still expect log files in /var/log instead of /var/log/pihole # Backward compatibility for user-scripts that still expect log files in /var/log instead of /var/log/pihole
# Should be removed with Pi-hole v6.0 # Should be removed with Pi-hole v6.0

@ -1,9 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Allows the WebUI to use Pi-hole commands
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
#

@ -1,5 +1,5 @@
_pihole() { _pihole() {
local cur prev opts opts_admin opts_checkout opts_chronometer opts_debug opts_interface opts_logging opts_privacy opts_query opts_update opts_version local cur prev opts opts_checkout opts_debug opts_logging opts_query opts_update opts_version
COMPREPLY=() COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}" cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}" prev="${COMP_WORDS[COMP_CWORD-1]}"
@ -7,25 +7,17 @@ _pihole() {
case "${prev}" in case "${prev}" in
"pihole") "pihole")
opts="admin blacklist checkout chronometer debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard whitelist arpflush" opts="blacklist checkout debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard whitelist arpflush"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;; ;;
"whitelist"|"blacklist"|"wildcard"|"regex") "whitelist"|"blacklist"|"wildcard"|"regex")
opts_lists="\--delmode \--noreload \--quiet \--list \--nuke" opts_lists="\--delmode \--noreload \--quiet \--list \--nuke"
COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) )
;; ;;
"admin")
opts_admin="celsius fahrenheit interface kelvin password privacylevel"
COMPREPLY=( $(compgen -W "${opts_admin}" -- ${cur}) )
;;
"checkout") "checkout")
opts_checkout="core ftl web master dev" opts_checkout="core ftl web master dev"
COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
;; ;;
"chronometer")
opts_chronometer="\--exit \--json \--refresh"
COMPREPLY=( $(compgen -W "${opts_chronometer}" -- ${cur}) )
;;
"debug") "debug")
opts_debug="-a" opts_debug="-a"
COMPREPLY=( $(compgen -W "${opts_debug}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_debug}" -- ${cur}) )
@ -35,33 +27,13 @@ _pihole() {
COMPREPLY=( $(compgen -W "${opts_logging}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_logging}" -- ${cur}) )
;; ;;
"query") "query")
opts_query="-adlist -all -exact" opts_query="--partial --all"
COMPREPLY=( $(compgen -W "${opts_query}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_query}" -- ${cur}) )
;; ;;
"updatePihole"|"-up") "updatePihole"|"-up")
opts_update="--check-only" opts_update="--check-only"
COMPREPLY=( $(compgen -W "${opts_update}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_update}" -- ${cur}) )
;; ;;
"version")
opts_version="\--admin \--current \--ftl \--hash \--latest \--pihole"
COMPREPLY=( $(compgen -W "${opts_version}" -- ${cur}) )
;;
"interface")
if ( [[ "$prev2" == "admin" ]] || [[ "$prev2" == "-a" ]] ); then
opts_interface="$(cat /proc/net/dev | cut -d: -s -f1)"
COMPREPLY=( $(compgen -W "${opts_interface}" -- ${cur}) )
else
return 1
fi
;;
"privacylevel")
if ( [[ "$prev2" == "admin" ]] || [[ "$prev2" == "-a" ]] ); then
opts_privacy="0 1 2 3"
COMPREPLY=( $(compgen -W "${opts_privacy}" -- ${cur}) )
else
return 1
fi
;;
"core"|"admin"|"ftl") "core"|"admin"|"ftl")
if [[ "$prev2" == "checkout" ]]; then if [[ "$prev2" == "checkout" ]]; then
opts_checkout="master dev" opts_checkout="master dev"

@ -1,648 +0,0 @@
# Configuration file for dnsmasq.
#
# Format is one option per line, legal options are the same
# as the long options legal on the command line. See
# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details.
# Listen on this specific port instead of the standard DNS port
# (53). Setting this to zero completely disables DNS function,
# leaving only DHCP and/or TFTP.
#port=5353
# The following two options make you a better netizen, since they
# tell dnsmasq to filter out queries which the public DNS cannot
# answer, and which load the servers (especially the root servers)
# unnecessarily. If you have a dial-on-demand link they also stop
# these requests from bringing up the link unnecessarily.
# Never forward plain names (without a dot or domain part)
#domain-needed
# Never forward addresses in the non-routed address spaces.
#bogus-priv
# Uncomment these to enable DNSSEC validation and caching:
# (Requires dnsmasq to be built with DNSSEC option.)
#conf-file=%%PREFIX%%/share/dnsmasq/trust-anchors.conf
#dnssec
# Replies which are not DNSSEC signed may be legitimate, because the domain
# is unsigned, or may be forgeries. Setting this option tells dnsmasq to
# check that an unsigned reply is OK, by finding a secure proof that a DS
# record somewhere between the root and the domain does not exist.
# The cost of setting this is that even queries in unsigned domains will need
# one or more extra DNS queries to verify.
#dnssec-check-unsigned
# Uncomment this to filter useless windows-originated DNS requests
# which can trigger dial-on-demand links needlessly.
# Note that (amongst other things) this blocks all SRV requests,
# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk.
# This option only affects forwarding, SRV records originating for
# dnsmasq (via srv-host= lines) are not suppressed by it.
#filterwin2k
# Change this line if you want dns to get its upstream servers from
# somewhere other that /etc/resolv.conf
#resolv-file=
# By default, dnsmasq will send queries to any of the upstream
# servers it knows about and tries to favor servers to are known
# to be up. Uncommenting this forces dnsmasq to try each query
# with each server strictly in the order they appear in
# /etc/resolv.conf
#strict-order
# If you don't want dnsmasq to read /etc/resolv.conf or any other
# file, getting its servers from this file instead (see below), then
# uncomment this.
#no-resolv
# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv
# files for changes and re-read them then uncomment this.
#no-poll
# Add other name servers here, with domain specs if they are for
# non-public domains.
#server=/localnet/192.168.0.1
# Example of routing PTR queries to nameservers: this will send all
# address->name queries for 192.168.3/24 to nameserver 10.1.2.3
#server=/3.168.192.in-addr.arpa/10.1.2.3
# Add local-only domains here, queries in these domains are answered
# from /etc/hosts or DHCP only.
#local=/localnet/
# Add domains which you want to force to an IP address here.
# The example below send any host in double-click.net to a local
# web-server.
#address=/double-click.net/127.0.0.1
# --address (and --server) work with IPv6 addresses too.
#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83
# Add the IPs of all queries to yahoo.com, google.com, and their
# subdomains to the vpn and search ipsets:
#ipset=/yahoo.com/google.com/vpn,search
# You can control how dnsmasq talks to a server: this forces
# queries to 10.1.2.3 to be routed via eth1
# server=10.1.2.3@eth1
# and this sets the source (ie local) address used to talk to
# 10.1.2.3 to 192.168.1.1 port 55 (there must be a interface with that
# IP on the machine, obviously).
# server=10.1.2.3@192.168.1.1#55
# If you want dnsmasq to change uid and gid to something other
# than the default, edit the following lines.
#user=
#group=
# If you want dnsmasq to listen for DHCP and DNS requests only on
# specified interfaces (and the loopback) give the name of the
# interface (eg eth0) here.
# Repeat the line for more than one interface.
#interface=
# Or you can specify which interface _not_ to listen on
#except-interface=
# Or which to listen on by address (remember to include 127.0.0.1 if
# you use this.)
#listen-address=
# If you want dnsmasq to provide only DNS service on an interface,
# configure it as shown above, and then use the following line to
# disable DHCP and TFTP on it.
#no-dhcp-interface=
# On systems which support it, dnsmasq binds the wildcard address,
# even when it is listening on only some interfaces. It then discards
# requests that it shouldn't reply to. This has the advantage of
# working even when interfaces come and go and change address. If you
# want dnsmasq to really bind only the interfaces it is listening on,
# uncomment this option. About the only time you may need this is when
# running another nameserver on the same machine.
#bind-interfaces
# If you don't want dnsmasq to read /etc/hosts, uncomment the
# following line.
#no-hosts
# or if you want it to read another file, as well as /etc/hosts, use
# this.
#addn-hosts=/etc/banner_add_hosts
# Set this (and domain: see below) if you want to have a domain
# automatically added to simple names in a hosts-file.
#expand-hosts
# Set the domain for dnsmasq. this is optional, but if it is set, it
# does the following things.
# 1) Allows DHCP hosts to have fully qualified domain names, as long
# as the domain part matches this setting.
# 2) Sets the "domain" DHCP option thereby potentially setting the
# domain of all systems configured by DHCP
# 3) Provides the domain part for "expand-hosts"
#domain=thekelleys.org.uk
# Set a different domain for a particular subnet
#domain=wireless.thekelleys.org.uk,192.168.2.0/24
# Same idea, but range rather then subnet
#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200
# Uncomment this to enable the integrated DHCP server, you need
# to supply the range of addresses available for lease and optionally
# a lease time. If you have more than one network, you will need to
# repeat this for each network on which you want to supply DHCP
# service.
#dhcp-range=192.168.0.50,192.168.0.150,12h
# This is an example of a DHCP range where the netmask is given. This
# is needed for networks we reach the dnsmasq DHCP server via a relay
# agent. If you don't know what a DHCP relay agent is, you probably
# don't need to worry about this.
#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h
# This is an example of a DHCP range which sets a tag, so that
# some DHCP options may be set only for this network.
#dhcp-range=set:red,192.168.0.50,192.168.0.150
# Use this DHCP range only when the tag "green" is set.
#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h
# Specify a subnet which can't be used for dynamic address allocation,
# is available for hosts with matching --dhcp-host lines. Note that
# dhcp-host declarations will be ignored unless there is a dhcp-range
# of some type for the subnet in question.
# In this case the netmask is implied (it comes from the network
# configuration on the machine running dnsmasq) it is possible to give
# an explicit netmask instead.
#dhcp-range=192.168.0.0,static
# Enable DHCPv6. Note that the prefix-length does not need to be specified
# and defaults to 64 if missing/
#dhcp-range=1234::2, 1234::500, 64, 12h
# Do Router Advertisements, BUT NOT DHCP for this subnet.
#dhcp-range=1234::, ra-only
# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
# hosts. Use the DHCPv4 lease to derive the name, network segment and
# MAC address and assume that the host will also have an
# IPv6 address calculated using the SLAAC algorithm.
#dhcp-range=1234::, ra-names
# Do Router Advertisements, BUT NOT DHCP for this subnet.
# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.)
#dhcp-range=1234::, ra-only, 48h
# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA
# so that clients can use SLAAC addresses as well as DHCP ones.
#dhcp-range=1234::2, 1234::500, slaac
# Do Router Advertisements and stateless DHCP for this subnet. Clients will
# not get addresses from DHCP, but they will get other configuration information.
# They will use SLAAC for addresses.
#dhcp-range=1234::, ra-stateless
# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses
# from DHCPv4 leases.
#dhcp-range=1234::, ra-stateless, ra-names
# Do router advertisements for all subnets where we're doing DHCPv6
# Unless overridden by ra-stateless, ra-names, et al, the router
# advertisements will have the M and O bits set, so that the clients
# get addresses and configuration from DHCPv6, and the A bit reset, so the
# clients don't use SLAAC addresses.
#enable-ra
# Supply parameters for specified hosts using DHCP. There are lots
# of valid alternatives, so we will give examples of each. Note that
# IP addresses DO NOT have to be in the range given above, they just
# need to be on the same network. The order of the parameters in these
# do not matter, it's permissible to give name, address and MAC in any
# order.
# Always allocate the host with Ethernet address 11:22:33:44:55:66
# The IP address 192.168.0.60
#dhcp-host=11:22:33:44:55:66,192.168.0.60
# Always set the name of the host with hardware address
# 11:22:33:44:55:66 to be "fred"
#dhcp-host=11:22:33:44:55:66,fred
# Always give the host with Ethernet address 11:22:33:44:55:66
# the name fred and IP address 192.168.0.60 and lease time 45 minutes
#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m
# Give a host with Ethernet address 11:22:33:44:55:66 or
# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume
# that these two Ethernet interfaces will never be in use at the same
# time, and give the IP address to the second, even if it is already
# in use by the first. Useful for laptops with wired and wireless
# addresses.
#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60
# Give the machine which says its name is "bert" IP address
# 192.168.0.70 and an infinite lease
#dhcp-host=bert,192.168.0.70,infinite
# Always give the host with client identifier 01:02:02:04
# the IP address 192.168.0.60
#dhcp-host=id:01:02:02:04,192.168.0.60
# Always give the host with client identifier "marjorie"
# the IP address 192.168.0.60
#dhcp-host=id:marjorie,192.168.0.60
# Enable the address given for "judge" in /etc/hosts
# to be given to a machine presenting the name "judge" when
# it asks for a DHCP lease.
#dhcp-host=judge
# Never offer DHCP service to a machine whose Ethernet
# address is 11:22:33:44:55:66
#dhcp-host=11:22:33:44:55:66,ignore
# Ignore any client-id presented by the machine with Ethernet
# address 11:22:33:44:55:66. This is useful to prevent a machine
# being treated differently when running under different OS's or
# between PXE boot and OS boot.
#dhcp-host=11:22:33:44:55:66,id:*
# Send extra options which are tagged as "red" to
# the machine with Ethernet address 11:22:33:44:55:66
#dhcp-host=11:22:33:44:55:66,set:red
# Send extra options which are tagged as "red" to
# any machine with Ethernet address starting 11:22:33:
#dhcp-host=11:22:33:*:*:*,set:red
# Give a fixed IPv6 address and name to client with
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
# Note also the they [] around the IPv6 address are obligatory.
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
# Ignore any clients which are not specified in dhcp-host lines
# or /etc/ethers. Equivalent to ISC "deny unknown-clients".
# This relies on the special "known" tag which is set when
# a host is matched.
#dhcp-ignore=tag:!known
# Send extra options which are tagged as "red" to any machine whose
# DHCP vendorclass string includes the substring "Linux"
#dhcp-vendorclass=set:red,Linux
# Send extra options which are tagged as "red" to any machine one
# of whose DHCP userclass strings includes the substring "accounts"
#dhcp-userclass=set:red,accounts
# Send extra options which are tagged as "red" to any machine whose
# MAC address matches the pattern.
#dhcp-mac=set:red,00:60:8C:*:*:*
# If this line is uncommented, dnsmasq will read /etc/ethers and act
# on the ethernet-address/IP pairs found there just as if they had
# been given as --dhcp-host options. Useful if you keep
# MAC-address/host mappings there for other purposes.
#read-ethers
# Send options to hosts which ask for a DHCP lease.
# See RFC 2132 for details of available options.
# Common options can be given to dnsmasq by name:
# run "dnsmasq --help dhcp" to get a list.
# Note that all the common settings, such as netmask and
# broadcast address, DNS server and default route, are given
# sane defaults by dnsmasq. You very likely will not need
# any dhcp-options. If you use Windows clients and Samba, there
# are some options which are recommended, they are detailed at the
# end of this section.
# Override the default route supplied by dnsmasq, which assumes the
# router is the same machine as the one running dnsmasq.
#dhcp-option=3,1.2.3.4
# Do the same thing, but using the option name
#dhcp-option=option:router,1.2.3.4
# Override the default route supplied by dnsmasq and send no default
# route at all. Note that this only works for the options sent by
# default (1, 3, 6, 12, 28) the same line will send a zero-length option
# for all other option numbers.
#dhcp-option=3
# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5
#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5
# Send DHCPv6 option. Note [] around IPv6 addresses.
#dhcp-option=option6:dns-server,[1234::77],[1234::88]
# Send DHCPv6 option for namservers as the machine running
# dnsmasq and another.
#dhcp-option=option6:dns-server,[::],[1234::88]
# Ask client to poll for option changes every six hours. (RFC4242)
#dhcp-option=option6:information-refresh-time,6h
# Set the NTP time server address to be the same machine as
# is running dnsmasq
#dhcp-option=42,0.0.0.0
# Set the NIS domain name to "welly"
#dhcp-option=40,welly
# Set the default time-to-live to 50
#dhcp-option=23,50
# Set the "all subnets are local" flag
#dhcp-option=27,1
# Send the etherboot magic flag and then etherboot options (a string).
#dhcp-option=128,e4:45:74:68:00:00
#dhcp-option=129,NIC=eepro100
# Specify an option which will only be sent to the "red" network
# (see dhcp-range for the declaration of the "red" network)
# Note that the tag: part must precede the option: part.
#dhcp-option = tag:red, option:ntp-server, 192.168.1.1
# The following DHCP options set up dnsmasq in the same way as is specified
# for the ISC dhcpcd in
# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt
# adapted for a typical dnsmasq installation where the host running
# dnsmasq is also the host running samba.
# you may want to uncomment some or all of them if you use
# Windows clients and Samba.
#dhcp-option=19,0 # option ip-forwarding off
#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s)
#dhcp-option=45,0.0.0.0 # netbios datagram distribution server
#dhcp-option=46,8 # netbios node type
# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave.
#dhcp-option=252,"\n"
# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client
# probably doesn't support this......
#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com
# Send RFC-3442 classless static routes (note the netmask encoding)
#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8
# Send vendor-class specific options encapsulated in DHCP option 43.
# The meaning of the options is defined by the vendor-class so
# options are sent only when the client supplied vendor class
# matches the class given here. (A substring match is OK, so "MSFT"
# matches "MSFT" and "MSFT 5.0"). This example sets the
# mtftp address to 0.0.0.0 for PXEClients.
#dhcp-option=vendor:PXEClient,1,0.0.0.0
# Send microsoft-specific option to tell windows to release the DHCP lease
# when it shuts down. Note the "i" flag, to tell dnsmasq to send the
# value as a four-byte integer - that's what microsoft wants. See
# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true
#dhcp-option=vendor:MSFT,2,1i
# Send the Encapsulated-vendor-class ID needed by some configurations of
# Etherboot to allow is to recognize the DHCP server.
#dhcp-option=vendor:Etherboot,60,"Etherboot"
# Send options to PXELinux. Note that we need to send the options even
# though they don't appear in the parameter request list, so we need
# to use dhcp-option-force here.
# See http://syslinux.zytor.com/pxe.php#special for details.
# Magic number - needed before anything else is recognized
#dhcp-option-force=208,f1:00:74:7e
# Configuration file name
#dhcp-option-force=209,configs/common
# Path prefix
#dhcp-option-force=210,/tftpboot/pxelinux/files/
# Reboot time. (Note 'i' to send 32-bit value)
#dhcp-option-force=211,30i
# Set the boot filename for netboot/PXE. You will only need
# this is you want to boot machines over the network and you will need
# a TFTP server; either dnsmasq's built in TFTP server or an
# external one. (See below for how to enable the TFTP server.)
#dhcp-boot=pxelinux.0
# The same as above, but use custom tftp-server instead machine running dnsmasq
#dhcp-boot=pxelinux,server.name,192.168.1.100
# Boot for Etherboot gPXE. The idea is to send two different
# filenames, the first loads gPXE, and the second tells gPXE what to
# load. The dhcp-match sets the gpxe tag for requests from gPXE.
#dhcp-match=set:gpxe,175 # gPXE sends a 175 option.
#dhcp-boot=tag:!gpxe,undionly.kpxe
#dhcp-boot=mybootimage
# Encapsulated options for Etherboot gPXE. All the options are
# encapsulated within option 175
#dhcp-option=encap:175, 1, 5b # priority code
#dhcp-option=encap:175, 176, 1b # no-proxydhcp
#dhcp-option=encap:175, 177, string # bus-id
#dhcp-option=encap:175, 189, 1b # BIOS drive code
#dhcp-option=encap:175, 190, user # iSCSI username
#dhcp-option=encap:175, 191, pass # iSCSI password
# Test for the architecture of a netboot client. PXE clients are
# supposed to send their architecture as option 93. (See RFC 4578)
#dhcp-match=peecees, option:client-arch, 0 #x86-32
#dhcp-match=itanics, option:client-arch, 2 #IA64
#dhcp-match=hammers, option:client-arch, 6 #x86-64
#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64
# Do real PXE, rather than just booting a single file, this is an
# alternative to dhcp-boot.
#pxe-prompt="What system shall I netboot?"
# or with timeout before first available action is taken:
#pxe-prompt="Press F8 for menu.", 60
# Available boot services. for PXE.
#pxe-service=x86PC, "Boot from local disk"
# Loads <tftp-root>/pxelinux.0 from dnsmasq TFTP server.
#pxe-service=x86PC, "Install Linux", pxelinux
# Loads <tftp-root>/pxelinux.0 from TFTP server at 1.2.3.4.
# Beware this fails on old PXE ROMS.
#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4
# Use bootserver on network, found my multicast or broadcast.
#pxe-service=x86PC, "Install windows from RIS server", 1
# Use bootserver at a known IP address.
#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4
# If you have multicast-FTP available,
# information for that can be passed in a similar way using options 1
# to 5. See page 19 of
# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf
# Enable dnsmasq's built-in TFTP server
#enable-tftp
# Set the root directory for files available via FTP.
#tftp-root=/var/ftpd
# Make the TFTP server more secure: with this set, only files owned by
# the user dnsmasq is running as will be send over the net.
#tftp-secure
# This option stops dnsmasq from negotiating a larger blocksize for TFTP
# transfers. It will slow things down, but may rescue some broken TFTP
# clients.
#tftp-no-blocksize
# Set the boot file name only when the "red" tag is set.
#dhcp-boot=tag:red,pxelinux.red-net
# An example of dhcp-boot with an external TFTP server: the name and IP
# address of the server are given after the filename.
# Can fail with old PXE ROMS. Overridden by --pxe-service.
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3
# If there are multiple external tftp servers having a same name
# (using /etc/hosts) then that name can be specified as the
# tftp_servername (the third option to dhcp-boot) and in that
# case dnsmasq resolves this name and returns the resultant IP
# addresses in round robin fashion. This facility can be used to
# load balance the tftp load among a set of servers.
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name
# Set the limit on DHCP leases, the default is 150
#dhcp-lease-max=150
# The DHCP server needs somewhere on disk to keep its lease database.
# This defaults to a sane location, but if you want to change it, use
# the line below.
#dhcp-leasefile=/var/lib/misc/dnsmasq.leases
# Set the DHCP server to authoritative mode. In this mode it will barge in
# and take over the lease for any client which broadcasts on the network,
# whether it has a record of the lease or not. This avoids long timeouts
# when a machine wakes up on a new network. DO NOT enable this if there's
# the slightest chance that you might end up accidentally configuring a DHCP
# server for your campus/company accidentally. The ISC server uses
# the same option, and this URL provides more information:
# http://www.isc.org/files/auth.html
#dhcp-authoritative
# Run an executable when a DHCP lease is created or destroyed.
# The arguments sent to the script are "add" or "del",
# then the MAC address, the IP address and finally the hostname
# if there is one.
#dhcp-script=/bin/echo
# Set the cachesize here.
#cache-size=150
# If you want to disable negative caching, uncomment this.
#no-negcache
# Normally responses which come from /etc/hosts and the DHCP lease
# file have Time-To-Live set as zero, which conventionally means
# do not cache further. If you are happy to trade lower load on the
# server for potentially stale date, you can set a time-to-live (in
# seconds) here.
#local-ttl=
# If you want dnsmasq to detect attempts by Verisign to send queries
# to unregistered .com and .net hosts to its sitefinder service and
# have dnsmasq instead return the correct NXDOMAIN response, uncomment
# this line. You can add similar lines to do the same for other
# registries which have implemented wildcard A records.
#bogus-nxdomain=64.94.110.11
# If you want to fix up DNS results from upstream servers, use the
# alias option. This only works for IPv4.
# This alias makes a result of 1.2.3.4 appear as 5.6.7.8
#alias=1.2.3.4,5.6.7.8
# and this maps 1.2.3.x to 5.6.7.x
#alias=1.2.3.0,5.6.7.0,255.255.255.0
# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40
#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0
# Change these lines if you want dnsmasq to serve MX records.
# Return an MX record named "maildomain.com" with target
# servermachine.com and preference 50
#mx-host=maildomain.com,servermachine.com,50
# Set the default target for MX records created using the localmx option.
#mx-target=servermachine.com
# Return an MX record pointing to the mx-target for all local
# machines.
#localmx
# Return an MX record pointing to itself for all local machines.
#selfmx
# Change the following lines if you want dnsmasq to serve SRV
# records. These are useful if you want to serve ldap requests for
# Active Directory and other windows-originated DNS requests.
# See RFC 2782.
# You may add multiple srv-host lines.
# The fields are <name>,<target>,<port>,<priority>,<weight>
# If the domain part if missing from the name (so that is just has the
# service and protocol sections) then the domain given by the domain=
# config option is used. (Note that expand-hosts does not need to be
# set for this to work.)
# A SRV record sending LDAP for the example.com domain to
# ldapserver.example.com port 389
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389
# A SRV record sending LDAP for the example.com domain to
# ldapserver.example.com port 389 (using domain=)
#domain=example.com
#srv-host=_ldap._tcp,ldapserver.example.com,389
# Two SRV records for LDAP, each with different priorities
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2
# A SRV record indicating that there is no LDAP server for the domain
# example.com
#srv-host=_ldap._tcp.example.com
# The following line shows how to make dnsmasq serve an arbitrary PTR
# record. This is useful for DNS-SD. (Note that the
# domain-name expansion done for SRV records _does_not
# occur for PTR records.)
#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services"
# Change the following lines to enable dnsmasq to serve TXT records.
# These are used for things like SPF and zeroconf. (Note that the
# domain-name expansion done for SRV records _does_not
# occur for TXT records.)
#Example SPF.
#txt-record=example.com,"v=spf1 a -all"
#Example zeroconf
#txt-record=_http._tcp.example.com,name=value,paper=A4
# Provide an alias for a "local" DNS name. Note that this _only_ works
# for targets which are names from DHCP or /etc/hosts. Give host
# "bert" another name, bertrand
#cname=bertand,bert
# For debugging purposes, log each DNS query as it passes through
# dnsmasq.
#log-queries
# Log lots of extra information about DHCP transactions.
#log-dhcp
# Include another lot of configuration options.
#conf-file=/etc/dnsmasq.more.conf
#conf-dir=/etc/dnsmasq.d
# Include all the files in a directory except those ending in .bak
#conf-dir=/etc/dnsmasq.d,.bak
# Include all files in a directory which end in .conf
#conf-dir=/etc/dnsmasq.d/*.conf

@ -1,73 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Lighttpd config for Pi-hole
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###################################################################################################
# IF THIS HEADER EXISTS, THE FILE WILL BE OVERWRITTEN BY PI-HOLE'S UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON THE NEXT UPDATE UNLESS YOU REMOVE THIS HEADER #
# #
# ENSURE THAT YOU DO NOT REMOVE THE REQUIRED LINE: #
# #
# include "/etc/lighttpd/conf-enabled/*.conf" #
# #
###################################################################################################
server.modules = (
"mod_access",
"mod_auth",
"mod_expire",
"mod_redirect",
"mod_setenv",
"mod_rewrite"
)
server.document-root = "/var/www/html"
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
server.errorlog = "/var/log/lighttpd/error-pihole.log"
server.pid-file = "/run/lighttpd.pid"
server.username = "www-data"
server.groupname = "www-data"
# For lighttpd version 1.4.46 or above, the port can be overwritten in `/etc/lighttpd/external.conf` using the := operator
# e.g. server.port := 8000
server.port = 80
# Allow streaming response
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
server.stream-response-body = 1
#ssl.read-ahead = "disable"
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
mimetype.assign = (
".ico" => "image/x-icon",
".jpeg" => "image/jpeg",
".jpg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".css" => "text/css; charset=utf-8",
".html" => "text/html; charset=utf-8",
".js" => "text/javascript; charset=utf-8",
".json" => "application/json; charset=utf-8",
".map" => "application/json; charset=utf-8",
".txt" => "text/plain; charset=utf-8",
".eot" => "application/vnd.ms-fontobject",
".otf" => "font/otf",
".ttc" => "font/collection",
".ttf" => "font/ttf",
".woff" => "font/woff",
".woff2" => "font/woff2"
)
# Add user chosen options held in (optional) external file
include "external*.conf"
# default listening port for IPv6 falls back to the IPv4 port
include_shell "/usr/share/lighttpd/use-ipv6.pl " + server.port
include "/etc/lighttpd/conf-enabled/*.conf"

@ -1,87 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Lighttpd config for Pi-hole
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###################################################################################################
# IF THIS HEADER EXISTS, THE FILE WILL BE OVERWRITTEN BY PI-HOLE'S UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON THE NEXT UPDATE UNLESS YOU REMOVE THIS HEADER #
# #
# ENSURE THAT YOU DO NOT REMOVE THE REQUIRED LINE: #
# #
# include "/etc/lighttpd/conf.d/pihole-admin.conf" #
# #
###################################################################################################
server.modules = (
"mod_access",
"mod_auth",
"mod_expire",
"mod_fastcgi",
"mod_accesslog",
"mod_redirect",
"mod_setenv",
"mod_rewrite"
)
server.document-root = "/var/www/html"
server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
server.errorlog = "/var/log/lighttpd/error-pihole.log"
server.pid-file = "/run/lighttpd.pid"
server.username = "lighttpd"
server.groupname = "lighttpd"
# For lighttpd version 1.4.46 or above, the port can be overwritten in `/etc/lighttpd/external.conf` using the := operator
# e.g. server.port := 8000
server.port = 80
# Allow streaming response
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
server.stream-response-body = 1
#ssl.read-ahead = "disable"
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
mimetype.assign = (
".ico" => "image/x-icon",
".jpeg" => "image/jpeg",
".jpg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".css" => "text/css; charset=utf-8",
".html" => "text/html; charset=utf-8",
".js" => "text/javascript; charset=utf-8",
".json" => "application/json; charset=utf-8",
".map" => "application/json; charset=utf-8",
".txt" => "text/plain; charset=utf-8",
".eot" => "application/vnd.ms-fontobject",
".otf" => "font/otf",
".ttc" => "font/collection",
".ttf" => "font/ttf",
".woff" => "font/woff",
".woff2" => "font/woff2"
)
# Add user chosen options held in (optional) external file
include "external*.conf"
# default listening port for IPv6 falls back to the IPv4 port
#include_shell "/usr/share/lighttpd/use-ipv6.pl " + server.port
#include_shell "/usr/share/lighttpd/create-mime.assign.pl"
#include_shell "/usr/share/lighttpd/include-conf-enabled.pl"
fastcgi.server = (
".php" => (
"localhost" => (
"socket" => "/tmp/php-fastcgi.socket",
"bin-path" => "/usr/bin/php-cgi"
)
)
)
include "/etc/lighttpd/conf.d/pihole-admin.conf"

@ -1,82 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Lighttpd config for Pi-hole
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###############################################################################
# FILE AUTOMATICALLY OVERWRITTEN BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
###############################################################################
server.errorlog := "/var/log/lighttpd/error-pihole.log"
$HTTP["url"] =~ "^/admin/" {
server.document-root = "/var/www/html"
server.stream-response-body = 1
accesslog.filename = "/var/log/lighttpd/access-pihole.log"
accesslog.format = "%{%s}t|%h|%V|%r|%s|%b"
fastcgi.server = (
".php" => (
"localhost" => (
"socket" => "/run/lighttpd/pihole-php-fastcgi.socket",
"bin-path" => "/usr/bin/php-cgi",
"min-procs" => 1,
"max-procs" => 1,
"bin-environment" => (
"PHP_FCGI_CHILDREN" => "4",
"PHP_FCGI_MAX_REQUESTS" => "10000",
),
"bin-copy-environment" => (
"PATH", "SHELL", "USER"
),
"broken-scriptfilename" => "enable",
)
)
)
# X-Pi-hole is a response header for debugging using curl -I
# X-Frame-Options prevents clickjacking attacks and helps ensure your content is not embedded into other sites via < frame >, < iframe > or < object >.
# X-XSS-Protection sets the configuration for the cross-site scripting filters built into most browsers. This is important because it tells the browser to block the response if a malicious script has been inserted from a user input. (deprecated; disabled)
# X-Content-Type-Options stops a browser from trying to MIME-sniff the content type and forces it to stick with the declared content-type. This is important because the browser will only load external resources if their content-type matches what is expected, and not malicious hidden code.
# Content-Security-Policy tells the browser where resources are allowed to be loaded and if its allowed to parse/run inline styles or Javascript. This is important because it prevents content injection attacks, such as Cross Site Scripting (XSS).
# X-Permitted-Cross-Domain-Policies is an XML document that grants a web client, such as Adobe Flash Player or Adobe Acrobat (though not necessarily limited to these), permission to handle data across domains.
# Referrer-Policy allows control/restriction of the amount of information present in the referral header for links away from your page—the URL path or even if the header is sent at all.
setenv.add-response-header = (
"X-Pi-hole" => "The Pi-hole Web interface is working!",
"X-Frame-Options" => "DENY",
"X-XSS-Protection" => "0",
"X-Content-Type-Options" => "nosniff",
"Content-Security-Policy" => "default-src 'self' 'unsafe-inline';",
"X-Permitted-Cross-Domain-Policies" => "none",
"Referrer-Policy" => "same-origin"
)
# Block . files from being served, such as .git, .github, .gitignore
$HTTP["url"] =~ "^/admin/\." {
url.access-deny = ("")
}
# allow teleporter and API qr code iframe on settings page
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
$HTTP["referer"] =~ "/admin/settings\.php" {
setenv.set-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
}
}
}
else $HTTP["url"] == "/admin" {
url.redirect = ("" => "/admin/")
}
$HTTP["host"] == "pi.hole" {
$HTTP["url"] == "/" {
url.redirect = ("" => "/admin/")
}
}
# (keep this on one line for basic-install.sh filtering during install)
server.modules += ( "mod_access", "mod_accesslog", "mod_redirect", "mod_fastcgi", "mod_setenv" )

File diff suppressed because it is too large Load Diff

@ -46,10 +46,6 @@ package_manager_detect
# Uninstall packages used by the Pi-hole # Uninstall packages used by the Pi-hole
DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}" "${OS_CHECK_DEPS[@]}") DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}" "${OS_CHECK_DEPS[@]}")
if [[ "${INSTALL_WEB_SERVER}" == true ]]; then
# Install the Web dependencies
DEPS+=("${PIHOLE_WEB_DEPS[@]}")
fi
# Compatibility # Compatibility
if [ -x "$(command -v apt-get)" ]; then if [ -x "$(command -v apt-get)" ]; then

@ -13,10 +13,17 @@
export LC_ALL=C export LC_ALL=C
coltable="/opt/pihole/COL_TABLE" PI_HOLE_SCRIPT_DIR="/opt/pihole"
source "${coltable}" # Source utils.sh for GetFTLConfigValue
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck disable=SC1090
. "${utilsfile}"
coltable="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
# shellcheck disable=SC1090
. "${coltable}"
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh" . "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh"
basename="pihole" basename="pihole"
PIHOLE_COMMAND="/usr/local/bin/${basename}" PIHOLE_COMMAND="/usr/local/bin/${basename}"
@ -29,62 +36,32 @@ blacklistFile="${piholeDir}/blacklist.txt"
regexFile="${piholeDir}/regex.list" regexFile="${piholeDir}/regex.list"
adListFile="${piholeDir}/adlists.list" adListFile="${piholeDir}/adlists.list"
localList="${piholeDir}/local.list"
VPNList="/etc/openvpn/ipp.txt"
piholeGitDir="/etc/.pihole" piholeGitDir="/etc/.pihole"
gravityDBfile_default="${piholeDir}/gravity.db" GRAVITYDB=$(getFTLConfigValue files.gravity)
# GRAVITYDB may be overwritten by source pihole-FTL.conf below GRAVITY_TMPDIR=$(getFTLConfigValue files.gravity_tmp)
GRAVITYDB="${gravityDBfile_default}"
gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql" gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql"
gravityDBcopy="${piholeGitDir}/advanced/Templates/gravity_copy.sql" gravityDBcopy="${piholeGitDir}/advanced/Templates/gravity_copy.sql"
domainsExtension="domains" domainsExtension="domains"
curl_connect_timeout=10 curl_connect_timeout=10
# Source setupVars from install script # Check gravity temp directory
setupVars="${piholeDir}/setupVars.conf"
if [[ -f "${setupVars}" ]];then
source "${setupVars}"
else
echo -e " ${COL_LIGHT_RED}Installation Failure: ${setupVars} does not exist! ${COL_NC}
Please run 'pihole -r', and choose the 'reconfigure' option to fix."
exit 1
fi
# Set up tmp dir variable in case it's not configured
: "${GRAVITY_TMPDIR:=/tmp}"
if [ ! -d "${GRAVITY_TMPDIR}" ] || [ ! -w "${GRAVITY_TMPDIR}" ]; then if [ ! -d "${GRAVITY_TMPDIR}" ] || [ ! -w "${GRAVITY_TMPDIR}" ]; then
echo -e " ${COL_LIGHT_RED}Gravity temporary directory does not exist or is not a writeable directory, falling back to /tmp. ${COL_NC}" echo -e " ${COL_LIGHT_RED}Gravity temporary directory does not exist or is not a writeable directory, falling back to /tmp. ${COL_NC}"
GRAVITY_TMPDIR="/tmp" GRAVITY_TMPDIR="/tmp"
fi fi
# Source pihole-FTL from install script
pihole_FTL="${piholeDir}/pihole-FTL.conf"
if [[ -f "${pihole_FTL}" ]]; then
source "${pihole_FTL}"
fi
# Set this only after sourcing pihole-FTL.conf as the gravity database path may # Set this only after sourcing pihole-FTL.conf as the gravity database path may
# have changed # have changed
gravityDBfile="${GRAVITYDB}" gravityDBfile="${GRAVITYDB}"
gravityDBfile_default="/etc/pihole/gravity.db"
gravityTEMPfile="${GRAVITYDB}_temp" gravityTEMPfile="${GRAVITYDB}_temp"
gravityDIR="$(dirname -- "${gravityDBfile}")" gravityDIR="$(dirname -- "${gravityDBfile}")"
gravityOLDfile="${gravityDIR}/gravity_old.db" gravityOLDfile="${gravityDIR}/gravity_old.db"
if [[ -z "${BLOCKINGMODE}" ]] ; then
BLOCKINGMODE="NULL"
fi
# Determine if superseded pihole.conf exists
if [[ -r "${piholeDir}/pihole.conf" ]]; then
echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}"
fi
# Generate new SQLite3 file from schema template # Generate new SQLite3 file from schema template
generate_gravity_database() { generate_gravity_database() {
if ! pihole-FTL sqlite3 -ni "${gravityDBfile}" < "${gravityDBschema}"; then if ! pihole-FTL sqlite3 -ni "${gravityDBfile}" <"${gravityDBschema}"; then
echo -e " ${CROSS} Unable to create ${gravityDBfile}" echo -e " ${CROSS} Unable to create ${gravityDBfile}"
return 1 return 1
fi fi
@ -92,14 +69,14 @@ generate_gravity_database() {
chmod g+w "${piholeDir}" "${gravityDBfile}" chmod g+w "${piholeDir}" "${gravityDBfile}"
} }
# Copy data from old to new database file and swap them # Build gravity tree
gravity_swap_databases() { gravity_build_tree() {
local str copyGravity oldAvail local str
str="Building tree" str="Building tree"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
# The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once # The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once
output=$( { pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 ) output=$({ pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1)
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@ -107,7 +84,10 @@ gravity_swap_databases() {
return 1 return 1
fi fi
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
}
# Copy data from old to new database file and swap them
gravity_swap_databases() {
str="Swapping databases" str="Swapping databases"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
@ -115,7 +95,7 @@ gravity_swap_databases() {
# Number of available blocks on disk # Number of available blocks on disk
availableBlocks=$(stat -f --format "%a" "${gravityDIR}") availableBlocks=$(stat -f --format "%a" "${gravityDIR}")
# Number of blocks, used by gravity.db # Number of blocks, used by gravity.db
gravityBlocks=$(stat --format "%b" ${gravityDBfile}) gravityBlocks=$(stat --format "%b" "${gravityDBfile}")
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db. # Only keep the old database if available disk space is at least twice the size of the existing gravity.db.
# Better be safe than sorry... # Better be safe than sorry...
oldAvail=false oldAvail=false
@ -135,11 +115,11 @@ gravity_swap_databases() {
# Update timestamp when the gravity table was last updated successfully # Update timestamp when the gravity table was last updated successfully
update_gravity_timestamp() { update_gravity_timestamp() {
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 -ni "${gravityDBfile}"; } 2>&1 ) output=$({ printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to update gravity timestamp in database ${gravityDBfile}\\n ${output}" echo -e "\\n ${CROSS} Unable to update gravity timestamp in database ${gravityTEMPfile}\\n ${output}"
return 1 return 1
fi fi
return 0 return 0
@ -157,6 +137,7 @@ database_table_from_file() {
# implementations of mktemp support it, e.g. on Alpine # implementations of mktemp support it, e.g. on Alpine
tmpFile="$(mktemp -p "${GRAVITY_TMPDIR}")" tmpFile="$(mktemp -p "${GRAVITY_TMPDIR}")"
mv "${tmpFile}" "${tmpFile%.*}.gravity" mv "${tmpFile}" "${tmpFile%.*}.gravity"
tmpFile="${tmpFile%.*}.gravity"
local timestamp local timestamp
timestamp="$(date --utc +'%s')" timestamp="$(date --utc +'%s')"
@ -188,19 +169,18 @@ database_table_from_file() {
# Loop over all domains in ${src} file # Loop over all domains in ${src} file
# Read file line by line # Read file line by line
grep -v '^ *#' < "${src}" | while IFS= read -r domain grep -v '^ *#' <"${src}" | while IFS= read -r domain; do
do
# Only add non-empty lines # Only add non-empty lines
if [[ -n "${domain}" ]]; then if [[ -n "${domain}" ]]; then
if [[ "${table}" == "domain_audit" ]]; then if [[ "${table}" == "domain_audit" ]]; then
# domain_audit table format (no enable or modified fields) # domain_audit table format (no enable or modified fields)
echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}" echo "${rowid},\"${domain}\",${timestamp}" >>"${tmpFile}"
elif [[ "${table}" == "adlist" ]]; then elif [[ "${table}" == "adlist" ]]; then
# Adlist table format # Adlist table format
echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\",,0,0,0" >> "${tmpFile}" echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\",,0,0,0,0,0" >>"${tmpFile}"
else else
# White-, black-, and regexlist table format # White-, black-, and regexlist table format
echo "${rowid},${list_type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\"" >> "${tmpFile}" echo "${rowid},${list_type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\"" >>"${tmpFile}"
fi fi
rowid+=1 rowid+=1
fi fi
@ -209,7 +189,7 @@ database_table_from_file() {
# Store domains in database table specified by ${table} # Store domains in database table specified by ${table}
# Use printf as .mode and .import need to be on separate lines # Use printf as .mode and .import need to be on separate lines
# see https://unix.stackexchange.com/a/445615/83260 # see https://unix.stackexchange.com/a/445615/83260
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | pihole-FTL sqlite3 -ni "${gravityDBfile}"; } 2>&1 ) output=$({ printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | pihole-FTL sqlite3 -ni "${gravityDBfile}"; } 2>&1)
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@ -219,17 +199,17 @@ database_table_from_file() {
# Move source file to backup directory, create directory if not existing # Move source file to backup directory, create directory if not existing
mkdir -p "${backup_path}" mkdir -p "${backup_path}"
mv "${src}" "${backup_file}" 2> /dev/null || \ mv "${src}" "${backup_file}" 2>/dev/null ||
echo -e " ${CROSS} Unable to backup ${src} to ${backup_path}" echo -e " ${CROSS} Unable to backup ${src} to ${backup_path}"
# Delete tmpFile # Delete tmpFile
rm "${tmpFile}" > /dev/null 2>&1 || \ rm "${tmpFile}" >/dev/null 2>&1 ||
echo -e " ${CROSS} Unable to remove ${tmpFile}" echo -e " ${CROSS} Unable to remove ${tmpFile}"
} }
# Check if a column with name ${2} exists in gravity table with name ${1} # Check if a column with name ${2} exists in gravity table with name ${1}
gravity_column_exists() { gravity_column_exists() {
output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1 ) output=$({ printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
if [[ "${output}" == "1" ]]; then if [[ "${output}" == "1" ]]; then
return 0 # Bash 0 is success return 0 # Bash 0 is success
fi fi
@ -241,10 +221,10 @@ gravity_column_exists() {
database_adlist_number() { database_adlist_number() {
# Only try to set number of domains when this field exists in the gravity database # Only try to set number of domains when this field exists in the gravity database
if ! gravity_column_exists "adlist" "number"; then if ! gravity_column_exists "adlist" "number"; then
return; return
fi fi
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${2}" "${3}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1 ) output=$({ printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${2}" "${3}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@ -257,10 +237,10 @@ database_adlist_number() {
database_adlist_status() { database_adlist_status() {
# Only try to set the status when this field exists in the gravity database # Only try to set the status when this field exists in the gravity database
if ! gravity_column_exists "adlist" "status"; then if ! gravity_column_exists "adlist" "status"; then
return; return
fi fi
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1 ) output=$({ printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@ -314,15 +294,10 @@ migrate_to_database() {
# Determine if DNS resolution is available before proceeding # Determine if DNS resolution is available before proceeding
gravity_CheckDNSResolutionAvailable() { gravity_CheckDNSResolutionAvailable() {
local lookupDomain="pi.hole" local lookupDomain="raw.githubusercontent.com"
# Determine if $localList does not exist, and ensure it is not empty
if [[ ! -e "${localList}" ]] || [[ -s "${localList}" ]]; then
lookupDomain="raw.githubusercontent.com"
fi
# Determine if $lookupDomain is resolvable # Determine if $lookupDomain is resolvable
if timeout 4 getent hosts "${lookupDomain}" &> /dev/null; then if timeout 4 getent hosts "${lookupDomain}" &>/dev/null; then
# Print confirmation of resolvability if it had previously failed # Print confirmation of resolvability if it had previously failed
if [[ -n "${secs:-}" ]]; then if [[ -n "${secs:-}" ]]; then
echo -e "${OVER} ${TICK} DNS resolution is now available\\n" echo -e "${OVER} ${TICK} DNS resolution is now available\\n"
@ -336,7 +311,7 @@ gravity_CheckDNSResolutionAvailable() {
# If the /etc/resolv.conf contains resolvers other than 127.0.0.1 then the local dnsmasq will not be queried and pi.hole is NXDOMAIN. # If the /etc/resolv.conf contains resolvers other than 127.0.0.1 then the local dnsmasq will not be queried and pi.hole is NXDOMAIN.
# This means that even though name resolution is working, the getent hosts check fails and the holddown timer keeps ticking and eventually fails # This means that even though name resolution is working, the getent hosts check fails and the holddown timer keeps ticking and eventually fails
# So we check the output of the last command and if it failed, attempt to use dig +short as a fallback # So we check the output of the last command and if it failed, attempt to use dig +short as a fallback
if timeout 4 dig +short "${lookupDomain}" &> /dev/null; then if timeout 4 dig +short "${lookupDomain}" &>/dev/null; then
if [[ -n "${secs:-}" ]]; then if [[ -n "${secs:-}" ]]; then
echo -e "${OVER} ${TICK} DNS resolution is now available\\n" echo -e "${OVER} ${TICK} DNS resolution is now available\\n"
fi fi
@ -347,7 +322,7 @@ gravity_CheckDNSResolutionAvailable() {
fi fi
# Determine error output message # Determine error output message
if pgrep pihole-FTL &> /dev/null; then if pgrep pihole-FTL &>/dev/null; then
echo -e " ${CROSS} DNS resolution is currently unavailable" echo -e " ${CROSS} DNS resolution is currently unavailable"
else else
echo -e " ${CROSS} DNS service is not running" echo -e " ${CROSS} DNS service is not running"
@ -357,7 +332,7 @@ gravity_CheckDNSResolutionAvailable() {
# Ensure DNS server is given time to be resolvable # Ensure DNS server is given time to be resolvable
secs="120" secs="120"
echo -ne " ${INFO} Time until retry: ${secs}" echo -ne " ${INFO} Time until retry: ${secs}"
until timeout 1 getent hosts "${lookupDomain}" &> /dev/null; do until timeout 1 getent hosts "${lookupDomain}" &>/dev/null; do
[[ "${secs:-}" -eq 0 ]] && break [[ "${secs:-}" -eq 0 ]] && break
echo -ne "${OVER} ${INFO} Time until retry: ${secs}" echo -ne "${OVER} ${INFO} Time until retry: ${secs}"
: $((secs--)) : $((secs--))
@ -378,18 +353,19 @@ gravity_DownloadBlocklists() {
# Retrieve source URLs from gravity database # Retrieve source URLs from gravity database
# We source only enabled adlists, SQLite3 stores boolean values as 0 (false) or 1 (true) # We source only enabled adlists, SQLite3 stores boolean values as 0 (false) or 1 (true)
mapfile -t sources <<< "$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)" mapfile -t sources <<<"$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2>/dev/null)"
mapfile -t sourceIDs <<< "$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)" mapfile -t sourceIDs <<<"$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2>/dev/null)"
mapfile -t sourceTypes <<<"$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT type FROM vw_adlist;" 2>/dev/null)"
# Parse source domains from $sources # Parse source domains from $sources
mapfile -t sourceDomains <<< "$( mapfile -t sourceDomains <<<"$(
# Logic: Split by folder/port # Logic: Split by folder/port
awk -F '[/:]' '{ awk -F '[/:]' '{
# Remove URL protocol & optional username:password@ # Remove URL protocol & optional username:password@
gsub(/(.*:\/\/|.*:.*@)/, "", $0) gsub(/(.*:\/\/|.*:.*@)/, "", $0)
if(length($1)>0){print $1} if(length($1)>0){print $1}
else {print "local"} else {print "local"}
}' <<< "$(printf '%s\n' "${sources[@]}")" 2> /dev/null }' <<<"$(printf '%s\n' "${sources[@]}")" 2>/dev/null
)" )"
local str="Pulling blocklist source list into range" local str="Pulling blocklist source list into range"
@ -401,14 +377,14 @@ gravity_DownloadBlocklists() {
unset sources unset sources
fi fi
local url domain str target compression local url domain str target compression adlist_type
echo "" echo ""
# Prepare new gravity database # Prepare new gravity database
str="Preparing new gravity database" str="Preparing new gravity database"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
rm "${gravityTEMPfile}" > /dev/null 2>&1 rm "${gravityTEMPfile}" >/dev/null 2>&1
output=$( { pihole-FTL sqlite3 -ni "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 ) output=$({ pihole-FTL sqlite3 -ni "${gravityTEMPfile}" <"${gravityDBschema}"; } 2>&1)
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@ -428,7 +404,7 @@ gravity_DownloadBlocklists() {
copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}" copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
fi fi
output=$( { pihole-FTL sqlite3 -ni "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 ) output=$({ pihole-FTL sqlite3 -ni "${gravityTEMPfile}" <<<"${copyGravity}"; } 2>&1)
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@ -452,6 +428,15 @@ gravity_DownloadBlocklists() {
url="${sources[$i]}" url="${sources[$i]}"
domain="${sourceDomains[$i]}" domain="${sourceDomains[$i]}"
id="${sourceIDs[$i]}" id="${sourceIDs[$i]}"
if [[ "${sourceTypes[$i]}" -eq "0" ]]; then
# Gravity list
str="blocklist"
adlist_type="gravity"
else
# AntiGravity list
str="allowlist"
adlist_type="antigravity"
fi
# Save the file as list.#.domain # Save the file as list.#.domain
saveLocation="${piholeDir}/list.${id}.${domain}.${domainsExtension}" saveLocation="${piholeDir}/list.${id}.${domain}.${domainsExtension}"
@ -464,12 +449,12 @@ gravity_DownloadBlocklists() {
# this will remove first @ that is after schema and before domain # this will remove first @ that is after schema and before domain
# \1 is optional schema, \2 is userinfo # \1 is optional schema, \2 is userinfo
check_url="$( sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<< "$url" )" check_url="$(sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<<"$url")"
if [[ "${check_url}" =~ ${regex} ]]; then if [[ "${check_url}" =~ ${regex} ]]; then
echo -e " ${CROSS} Invalid Target" echo -e " ${CROSS} Invalid Target"
else else
gravity_DownloadBlocklistFromUrl "${url}" "${sourceIDs[$i]}" "${saveLocation}" "${target}" "${compression}" gravity_DownloadBlocklistFromUrl "${url}" "${sourceIDs[$i]}" "${saveLocation}" "${target}" "${compression}" "${adlist_type}" "${domain}"
fi fi
echo "" echo ""
done done
@ -484,7 +469,7 @@ compareLists() {
if [[ -s "${target}.sha1" ]]; then if [[ -s "${target}.sha1" ]]; then
if ! sha1sum --check --status --strict "${target}.sha1"; then if ! sha1sum --check --status --strict "${target}.sha1"; then
# The list changed upstream, we need to update the checksum # The list changed upstream, we need to update the checksum
sha1sum "${target}" > "${target}.sha1" sha1sum "${target}" >"${target}.sha1"
echo " ${INFO} List has been updated" echo " ${INFO} List has been updated"
database_adlist_status "${adlistID}" "1" database_adlist_status "${adlistID}" "1"
else else
@ -493,7 +478,7 @@ compareLists() {
fi fi
else else
# No checksum available, create one for comparing on the next run # No checksum available, create one for comparing on the next run
sha1sum "${target}" > "${target}.sha1" sha1sum "${target}" >"${target}.sha1"
# We assume here it was changed upstream # We assume here it was changed upstream
database_adlist_status "${adlistID}" "1" database_adlist_status "${adlistID}" "1"
fi fi
@ -501,13 +486,14 @@ compareLists() {
# Download specified URL and perform checks on HTTP status and file content # Download specified URL and perform checks on HTTP status and file content
gravity_DownloadBlocklistFromUrl() { gravity_DownloadBlocklistFromUrl() {
local url="${1}" adlistID="${2}" saveLocation="${3}" target="${4}" compression="${5}" local url="${1}" adlistID="${2}" saveLocation="${3}" target="${4}" compression="${5}" gravity_type="${6}" domain="${7}"
local heisenbergCompensator="" listCurlBuffer str httpCode success="" ip cmd_ext local heisenbergCompensator="" listCurlBuffer str httpCode success="" ip cmd_ext
# Create temp file to store content on disk instead of RAM # Create temp file to store content on disk instead of RAM
# We don't use '--suffix' here because not all implementations of mktemp support it, e.g. on Alpine # We don't use '--suffix' here because not all implementations of mktemp support it, e.g. on Alpine
listCurlBuffer="$(mktemp -p "${GRAVITY_TMPDIR}")" listCurlBuffer="$(mktemp -p "${GRAVITY_TMPDIR}")"
mv "${listCurlBuffer}" "${listCurlBuffer%.*}.phgpb" mv "${listCurlBuffer}" "${listCurlBuffer%.*}.phgpb"
listCurlBuffer="${listCurlBuffer%.*}.phgpb"
# Determine if $saveLocation has read permission # Determine if $saveLocation has read permission
if [[ -r "${saveLocation}" && $url != "file"* ]]; then if [[ -r "${saveLocation}" && $url != "file"* ]]; then
@ -520,74 +506,128 @@ gravity_DownloadBlocklistFromUrl() {
str="Status:" str="Status:"
echo -ne " ${INFO} ${str} Pending..." echo -ne " ${INFO} ${str} Pending..."
blocked=false blocked=false
case $BLOCKINGMODE in case $(getFTLConfigValue dns.blocking.mode) in
"IP-NODATA-AAAA"|"IP") "IP-NODATA-AAAA" | "IP")
# Get IP address of this domain # Get IP address of this domain
ip="$(dig "${domain}" +short)" ip="$(dig "${domain}" +short)"
# Check if this IP matches any IP of the system # Check if this IP matches any IP of the system
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<< "$(ip a)") -gt 0 ]]; then if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<<"$(ip a)") -gt 0 ]]; then
blocked=true blocked=true
fi;; fi
"NXDOMAIN") ;;
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then "NXDOMAIN")
blocked=true if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
fi;; blocked=true
"NODATA") fi
if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then ;;
blocked=true "NODATA")
fi;; if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
"NULL"|*) blocked=true
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then fi
blocked=true ;;
fi;; "NULL" | *)
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
blocked=true
fi
;;
esac esac
if [[ "${blocked}" == true ]]; then # Check if this domain is blocked by Pi-hole but only if the domain is not a
printf -v ip_addr "%s" "${PIHOLE_DNS_1%#*}" # local file or empty
if [[ ${PIHOLE_DNS_1} != *"#"* ]]; then if [[ $url != "file"* ]] && [[ -n "${domain}" ]]; then
port=53 case $(getFTLConfigValue dns.blocking.mode) in
else "IP-NODATA-AAAA"|"IP")
printf -v port "%s" "${PIHOLE_DNS_1#*#}" # Get IP address of this domain
ip="$(dig "${domain}" +short)"
# Check if this IP matches any IP of the system
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<< "$(ip a)") -gt 0 ]]; then
blocked=true
fi;;
"NXDOMAIN")
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
blocked=true
fi;;
"NODATA")
if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
blocked=true
fi;;
"NULL"|*)
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
blocked=true
fi;;
esac
if [[ "${blocked}" == true ]]; then
# Get first defined upstream server
local upstream
upstream="$(getFTLConfigValue dns.upstreams)"
# Isolate first upstream server from a string like
# [ 1.2.3.4#1234, 5.6.7.8#5678, ... ]
upstream="${upstream%%,*}"
upstream="${upstream##*[}"
upstream="${upstream%%]*}"
# Trim leading and trailing spaces and tabs
upstream="${upstream#"${upstream%%[![:space:]]*}"}"
upstream="${upstream%"${upstream##*[![:space:]]}"}"
# Get IP address and port of this upstream server
local ip_addr port
printf -v ip_addr "%s" "${upstream%#*}"
if [[ ${upstream} != *"#"* ]]; then
port=53
else
printf -v port "%s" "${upstream#*#}"
fi
ip=$(dig "@${ip_addr}" -p "${port}" +short "${domain}" | tail -1)
if [[ $(echo "${url}" | awk -F '://' '{print $1}') = "https" ]]; then
port=443
else
port=80
fi
echo -e "${OVER} ${CROSS} ${str} ${domain} is blocked by one of your lists. Using DNS server ${upstream} instead";
echo -ne " ${INFO} ${str} Pending..."
cmd_ext="--resolve $domain:$port:$ip"
fi fi
ip=$(dig "@${ip_addr}" -p "${port}" +short "${domain}" | tail -1)
if [[ $(echo "${url}" | awk -F '://' '{print $1}') = "https" ]]; then
port=443;
else port=80
fi
bad_list=$(pihole -q -adlist "${domain}" | head -n1 | awk -F 'Match found in ' '{print $2}')
echo -e "${OVER} ${CROSS} ${str} ${domain} is blocked by ${bad_list%:}. Using DNS on ${PIHOLE_DNS_1} to download ${url}";
echo -ne " ${INFO} ${str} Pending..."
cmd_ext="--resolve $domain:$port:$ip"
fi fi
# shellcheck disable=SC2086 # shellcheck disable=SC2086
httpCode=$(curl --connect-timeout ${curl_connect_timeout} -s -L ${compression} ${cmd_ext} ${heisenbergCompensator} -w "%{http_code}" "${url}" -o "${listCurlBuffer}" 2> /dev/null) httpCode=$(curl --connect-timeout ${curl_connect_timeout} -s -L ${compression} ${cmd_ext} ${heisenbergCompensator} -w "%{http_code}" "${url}" -o "${listCurlBuffer}" 2>/dev/null)
case $url in case $url in
# Did we "download" a local file? # Did we "download" a local file?
"file"*) "file"*)
if [[ -s "${listCurlBuffer}" ]]; then if [[ -s "${listCurlBuffer}" ]]; then
echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true echo -e "${OVER} ${TICK} ${str} Retrieval successful"
else success=true
echo -e "${OVER} ${CROSS} ${str} Not found / empty list" else
fi;; echo -e "${OVER} ${CROSS} ${str} Not found / empty list"
# Did we "download" a remote file? fi
*) ;;
# Determine "Status:" output based on HTTP response # Did we "download" a remote file?
case "${httpCode}" in *)
"200") echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true;; # Determine "Status:" output based on HTTP response
"304") echo -e "${OVER} ${TICK} ${str} No changes detected"; success=true;; case "${httpCode}" in
"000") echo -e "${OVER} ${CROSS} ${str} Connection Refused";; "200")
"403") echo -e "${OVER} ${CROSS} ${str} Forbidden";; echo -e "${OVER} ${TICK} ${str} Retrieval successful"
"404") echo -e "${OVER} ${CROSS} ${str} Not found";; success=true
"408") echo -e "${OVER} ${CROSS} ${str} Time-out";; ;;
"451") echo -e "${OVER} ${CROSS} ${str} Unavailable For Legal Reasons";; "304")
"500") echo -e "${OVER} ${CROSS} ${str} Internal Server Error";; echo -e "${OVER} ${TICK} ${str} No changes detected"
"504") echo -e "${OVER} ${CROSS} ${str} Connection Timed Out (Gateway)";; success=true
"521") echo -e "${OVER} ${CROSS} ${str} Web Server Is Down (Cloudflare)";; ;;
"522") echo -e "${OVER} ${CROSS} ${str} Connection Timed Out (Cloudflare)";; "000") echo -e "${OVER} ${CROSS} ${str} Connection Refused" ;;
* ) echo -e "${OVER} ${CROSS} ${str} ${url} (${httpCode})";; "403") echo -e "${OVER} ${CROSS} ${str} Forbidden" ;;
esac;; "404") echo -e "${OVER} ${CROSS} ${str} Not found" ;;
"408") echo -e "${OVER} ${CROSS} ${str} Time-out" ;;
"451") echo -e "${OVER} ${CROSS} ${str} Unavailable For Legal Reasons" ;;
"500") echo -e "${OVER} ${CROSS} ${str} Internal Server Error" ;;
"504") echo -e "${OVER} ${CROSS} ${str} Connection Timed Out (Gateway)" ;;
"521") echo -e "${OVER} ${CROSS} ${str} Web Server Is Down (Cloudflare)" ;;
"522") echo -e "${OVER} ${CROSS} ${str} Connection Timed Out (Cloudflare)" ;;
*) echo -e "${OVER} ${CROSS} ${str} ${url} (${httpCode})" ;;
esac
;;
esac esac
local done="false" local done="false"
@ -595,7 +635,7 @@ gravity_DownloadBlocklistFromUrl() {
if [[ "${success}" == true ]]; then if [[ "${success}" == true ]]; then
if [[ "${httpCode}" == "304" ]]; then if [[ "${httpCode}" == "304" ]]; then
# Add domains to database table file # Add domains to database table file
pihole-FTL gravity parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}" pihole-FTL "${gravity_type}" parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}"
database_adlist_status "${adlistID}" "2" database_adlist_status "${adlistID}" "2"
done="true" done="true"
# Check if $listCurlBuffer is a non-zero length file # Check if $listCurlBuffer is a non-zero length file
@ -605,7 +645,7 @@ gravity_DownloadBlocklistFromUrl() {
# Remove curl buffer file after its use # Remove curl buffer file after its use
rm "${listCurlBuffer}" rm "${listCurlBuffer}"
# Add domains to database table file # Add domains to database table file
pihole-FTL gravity parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}" pihole-FTL "${gravity_type}" parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}"
# Compare lists, are they identical? # Compare lists, are they identical?
compareLists "${adlistID}" "${saveLocation}" compareLists "${adlistID}" "${saveLocation}"
done="true" done="true"
@ -621,7 +661,7 @@ gravity_DownloadBlocklistFromUrl() {
if [[ -r "${saveLocation}" ]]; then if [[ -r "${saveLocation}" ]]; then
echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
# Add domains to database table file # Add domains to database table file
pihole-FTL gravity parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}" pihole-FTL "${gravity_type}" parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}"
database_adlist_status "${adlistID}" "3" database_adlist_status "${adlistID}" "3"
else else
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
@ -641,7 +681,7 @@ gravity_ParseFileIntoDomains() {
# This helps with that and makes it easier to read # This helps with that and makes it easier to read
# It also helps with debugging so each stage of the script can be researched more in depth # It also helps with debugging so each stage of the script can be researched more in depth
# 1) Convert all characters to lowercase # 1) Convert all characters to lowercase
tr '[:upper:]' '[:lower:]' < "${src}" > "${destination}" tr '[:upper:]' '[:lower:]' <"${src}" >"${destination}"
# 2) Remove carriage returns # 2) Remove carriage returns
# 3) Remove lines starting with ! (ABP Comments) # 3) Remove lines starting with ! (ABP Comments)
@ -651,7 +691,7 @@ gravity_ParseFileIntoDomains() {
# 7) Remove leading tabs, spaces, etc. (Also removes leading IP addresses) # 7) Remove leading tabs, spaces, etc. (Also removes leading IP addresses)
# 8) Remove empty lines # 8) Remove empty lines
sed -i -r \ sed -i -r \
-e 's/\r$//' \ -e 's/\r$//' \
-e 's/\s*!.*//g' \ -e 's/\s*!.*//g' \
-e 's/\s*\[.*//g' \ -e 's/\s*\[.*//g' \
@ -668,12 +708,12 @@ gravity_Table_Count() {
local table="${1}" local table="${1}"
local str="${2}" local str="${2}"
local num local num
num="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")" num="$(pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM ${table};")"
if [[ "${table}" == "gravity" ]]; then if [[ "${table}" == "gravity" ]]; then
local unique local unique
unique="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT COUNT(*) FROM (SELECT DISTINCT domain FROM ${table});")" unique="$(pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM (SELECT DISTINCT domain FROM ${table});")"
echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})" echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
pihole-FTL sqlite3 -ni "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});" pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
else else
echo -e " ${INFO} Number of ${str}: ${num}" echo -e " ${INFO} Number of ${str}: ${num}"
fi fi
@ -684,22 +724,10 @@ gravity_ShowCount() {
# Here we use the table "gravity" instead of the view "vw_gravity" for speed. # Here we use the table "gravity" instead of the view "vw_gravity" for speed.
# It's safe to replace it here, because right after a gravity run both will show the exactly same number of domains. # It's safe to replace it here, because right after a gravity run both will show the exactly same number of domains.
gravity_Table_Count "gravity" "gravity domains" "" gravity_Table_Count "gravity" "gravity domains" ""
gravity_Table_Count "vw_blacklist" "exact blacklisted domains" gravity_Table_Count "vw_blacklist" "exact denied domains"
gravity_Table_Count "vw_regex_blacklist" "regex blacklist filters" gravity_Table_Count "vw_regex_blacklist" "regex denied filters"
gravity_Table_Count "vw_whitelist" "exact whitelisted domains" gravity_Table_Count "vw_whitelist" "exact allowed domains"
gravity_Table_Count "vw_regex_whitelist" "regex whitelist filters" gravity_Table_Count "vw_regex_whitelist" "regex allowed filters"
}
# Create "localhost" entries into hosts format
gravity_generateLocalList() {
# Empty $localList if it already exists, otherwise, create it
echo "### Do not modify this file, it will be overwritten by pihole -g" > "${localList}"
chmod 644 "${localList}"
# Add additional LAN hosts provided by OpenVPN (if available)
if [[ -f "${VPNList}" ]]; then
awk -F, '{printf $2"\t"$1".vpn\n"}' "${VPNList}" >> "${localList}"
fi
} }
# Trap Ctrl-C # Trap Ctrl-C
@ -715,12 +743,12 @@ gravity_Cleanup() {
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
# Delete tmp content generated by Gravity # Delete tmp content generated by Gravity
rm ${piholeDir}/pihole.*.txt 2> /dev/null rm ${piholeDir}/pihole.*.txt 2>/dev/null
rm ${piholeDir}/*.tmp 2> /dev/null rm ${piholeDir}/*.tmp 2>/dev/null
# listCurlBuffer location # listCurlBuffer location
rm "${GRAVITY_TMPDIR}"/*.phgpb 2> /dev/null rm "${GRAVITY_TMPDIR}"/*.phgpb 2>/dev/null
# invalid_domains location # invalid_domains location
rm "${GRAVITY_TMPDIR}"/*.ph-non-domains 2> /dev/null rm "${GRAVITY_TMPDIR}"/*.ph-non-domains 2>/dev/null
# Ensure this function only runs when gravity_SetDownloadOptions() has completed # Ensure this function only runs when gravity_SetDownloadOptions() has completed
if [[ "${gravity_Blackbody:-}" == true ]]; then if [[ "${gravity_Blackbody:-}" == true ]]; then
@ -728,7 +756,7 @@ gravity_Cleanup() {
for file in "${piholeDir}"/*."${domainsExtension}"; do for file in "${piholeDir}"/*."${domainsExtension}"; do
# If list is not in active array, then remove it # If list is not in active array, then remove it
if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then
rm -f "${file}" 2> /dev/null || \ rm -f "${file}" 2>/dev/null ||
echo -e " ${CROSS} Failed to remove ${file##*/}" echo -e " ${CROSS} Failed to remove ${file##*/}"
fi fi
done done
@ -736,11 +764,11 @@ gravity_Cleanup() {
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
# Only restart DNS service if offline # # Only restart DNS service if offline
if ! pgrep pihole-FTL &> /dev/null; then # if ! pgrep pihole-FTL &> /dev/null; then
"${PIHOLE_COMMAND}" restartdns # "${PIHOLE_COMMAND}" restartdns
dnsWasOffline=true # dnsWasOffline=true
fi # fi
# Print Pi-hole status if an error occurred # Print Pi-hole status if an error occurred
if [[ -n "${error}" ]]; then if [[ -n "${error}" ]]; then
@ -770,17 +798,17 @@ database_recovery() {
fi fi
else else
echo -e "${OVER} ${CROSS} ${str} - errors found:" echo -e "${OVER} ${CROSS} ${str} - errors found:"
while IFS= read -r line ; do echo " - $line"; done <<< "$result" while IFS= read -r line; do echo " - $line"; done <<<"$result"
fi fi
else else
echo -e "${OVER} ${CROSS} ${str} - errors found:" echo -e "${OVER} ${CROSS} ${str} - errors found:"
while IFS= read -r line ; do echo " - $line"; done <<< "$result" while IFS= read -r line; do echo " - $line"; done <<<"$result"
fi fi
str="Trying to recover existing gravity database" str="Trying to recover existing gravity database"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
# We have to remove any possibly existing recovery database or this will fail # We have to remove any possibly existing recovery database or this will fail
rm -f "${gravityDBfile}.recovered" > /dev/null 2>&1 rm -f "${gravityDBfile}.recovered" >/dev/null 2>&1
if result="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" ".recover" | pihole-FTL sqlite3 -ni "${gravityDBfile}.recovered" 2>&1)"; then if result="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" ".recover" | pihole-FTL sqlite3 -ni "${gravityDBfile}.recovered" 2>&1)"; then
echo -e "${OVER} ${TICK} ${str} - success" echo -e "${OVER} ${TICK} ${str} - success"
mv "${gravityDBfile}" "${gravityDBfile}.old" mv "${gravityDBfile}" "${gravityDBfile}.old"
@ -789,7 +817,7 @@ database_recovery() {
echo -ne " ${INFO} The old ${gravityDBfile} has been moved to ${gravityDBfile}.old" echo -ne " ${INFO} The old ${gravityDBfile} has been moved to ${gravityDBfile}.old"
else else
echo -e "${OVER} ${CROSS} ${str} - the following errors happened:" echo -e "${OVER} ${CROSS} ${str} - the following errors happened:"
while IFS= read -r line ; do echo " - $line"; done <<< "$result" while IFS= read -r line; do echo " - $line"; done <<<"$result"
echo -e " ${CROSS} Recovery failed. Try \"pihole -r recreate\" instead." echo -e " ${CROSS} Recovery failed. Try \"pihole -r recreate\" instead."
exit 1 exit 1
fi fi
@ -808,9 +836,10 @@ Options:
repairSelector() { repairSelector() {
case "$1" in case "$1" in
"recover") recover_database=true;; "recover") recover_database=true ;;
"recreate") recreate_database=true;; "recreate") recreate_database=true ;;
*) echo "Usage: pihole -g -r {recover,recreate} *)
echo "Usage: pihole -g -r {recover,recreate}
Attempt to repair gravity database Attempt to repair gravity database
Available options: Available options:
@ -829,15 +858,17 @@ Available options:
and create a new file from scratch. If you still and create a new file from scratch. If you still
have the migration backup created when migrating have the migration backup created when migrating
to Pi-hole v5.0, Pi-hole will import these files." to Pi-hole v5.0, Pi-hole will import these files."
exit 0;; exit 0
;;
esac esac
} }
for var in "$@"; do for var in "$@"; do
case "${var}" in case "${var}" in
"-f" | "--force" ) forceDelete=true;; "-f" | "--force" ) forceDelete=true;;
"-r" | "--repair" ) repairSelector "$3";; "-r" | "--repair" ) repairSelector "$3";;
"-h" | "--help" ) helpFunc;; "-u" | "--upgrade" ) upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"; exit 0;;
"-h" | "--help" ) helpFunc;;
esac esac
done done
@ -853,9 +884,9 @@ if [[ "${recreate_database:-}" == true ]]; then
str="Recreating gravity database from migration backup" str="Recreating gravity database from migration backup"
echo -ne "${INFO} ${str}..." echo -ne "${INFO} ${str}..."
rm "${gravityDBfile}" rm "${gravityDBfile}"
pushd "${piholeDir}" > /dev/null || exit pushd "${piholeDir}" >/dev/null || exit
cp migration_backup/* . cp migration_backup/* .
popd > /dev/null || exit popd >/dev/null || exit
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
fi fi
@ -873,7 +904,7 @@ if [[ "${forceDelete:-}" == true ]]; then
str="Deleting existing list cache" str="Deleting existing list cache"
echo -ne "${INFO} ${str}..." echo -ne "${INFO} ${str}..."
rm /etc/pihole/list.* 2> /dev/null || true rm /etc/pihole/list.* 2>/dev/null || true
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
fi fi
@ -888,31 +919,30 @@ if ! gravity_DownloadBlocklists; then
exit 1 exit 1
fi fi
# Create local.list
gravity_generateLocalList
# Migrate rest of the data from old to new database
if ! gravity_swap_databases; then
echo -e " ${CROSS} Unable to create database. Please contact support."
exit 1
fi
# Update gravity timestamp # Update gravity timestamp
update_gravity_timestamp update_gravity_timestamp
# Ensure proper permissions are set for the database # Ensure proper permissions are set for the database
chown pihole:pihole "${gravityDBfile}" chown pihole:pihole "${gravityTEMPfile}"
chmod g+w "${piholeDir}" "${gravityDBfile}" chmod g+w "${piholeDir}" "${gravityTEMPfile}"
# Compute numbers to be displayed # Build the tree
gravity_build_tree
# Compute numbers to be displayed (do this after building the tree to get the
# numbers quickly from the tree instead of having to scan the whole database)
gravity_ShowCount gravity_ShowCount
# Determine if DNS has been restarted by this instance of gravity # Migrate rest of the data from old to new database
if [[ -z "${dnsWasOffline:-}" ]]; then # IMPORTANT: Swapping the databases must be the last step before the cleanup
"${PIHOLE_COMMAND}" restartdns reload if ! gravity_swap_databases; then
echo -e " ${CROSS} Unable to create database. Please contact support."
exit 1
fi fi
gravity_Cleanup gravity_Cleanup
echo "" echo ""
"${PIHOLE_COMMAND}" status echo " ${TICK} Done."
# "${PIHOLE_COMMAND}" status

@ -1,154 +0,0 @@
.TH "Pihole-FTL" "8" "pihole-FTL" "Pi-hole" "November 2020"
.SH "NAME"
pihole-FTL - Pi-hole : The Faster-Than-Light (FTL) Engine
.br
.SH "SYNOPSIS"
\fBservice pihole-FTL \fR(\fBstart\fR|\fBstop\fR|\fBrestart\fR)
.br
\fBpihole-FTL debug\fR
.br
\fBpihole-FTL test\fR
.br
\fBpihole-FTL -v|-vv\fR
.br
\fBpihole-FTL -t\fR
.br
\fBpihole-FTL -b\fR
.br
\fBpihole-FTL -f\fR
.br
\fBpihole-FTL -h\fR
.br
\fBpihole-FTL dnsmasq-test\fR
.br
\fBpihole-FTL regex-test str\fR
.br
\fBpihole-FTL regex-test str rgx\fR
.br
\fBpihole-FTL lua\fR
.br
\fBpihole-FTL luac\fR
.br
\fBpihole-FTL dhcp-discover\fR
.br
\fBpihole-FTL --\fR (\fBoptions\fR)
.br
.SH "DESCRIPTION"
Pi-hole : The Faster-Than-Light (FTL) Engine is a lightweight, purpose-built daemon used to provide statistics needed for the Pi-hole Web Interface, and its API can be easily integrated into your own projects. Although it is an optional component of the Pi-hole ecosystem, it will be installed by default to provide statistics. As the name implies, FTL does its work \fIvery\fR \fIquickly\fR!
.br
Usage
.br
\fBservice pihole-FTL start\fR
.br
Start the pihole-FTL daemon
.br
\fBservice pihole-FTL stop\fR
.br
Stop the pihole-FTL daemon
.br
\fBservice pihole-FTL restart\fR
.br
If the pihole-FTP daemon is running, stop and then start, otherwise start.
.br
Command line arguments
.br
\fBdebug\fR
.br
Don't go into daemon mode (stay in foreground) + more verbose logging
.br
\fBtest\fR
.br
Start FTL and process everything, but shut down immediately afterwards
.br
\fB-v, version\fR
.br
Don't start FTL, show only version
.br
\fB-vv\fR
.br
Don't start FTL, show verbose version information of embedded applications
.br
\fB-t, tag\fR
.br
Don't start FTL, show only git tag
.br
\fB-b, branch\fR
.br
Don't start FTL, show only git branch FTL was compiled from
.br
\fB-f, no-daemon\fR
.br
Don't go into background (daemon mode)
.br
\fB-h, help\fR
.br
Don't start FTL, show help
.br
\fBdnsmasq-test\fR
.br
Test resolver config file syntax
.br
\fBregex-test str\fR
.br
Test str against all regular expressions in the database
.br
\fBregex-test str rgx\fR
.br
Test str against regular expression given by rgx
.br
\fBlua\fR
.br
Start the embedded Lua interpreter
.br
\fBluac\fR
.br
Execute the embedded Lua compiler
.br
\fBdhcp-discover\fR
.br
Discover DHCP servers in the local network
.br
\fB--\fR (options)
.br
Pass options to internal dnsmasq resolver
.br
.SH "EXAMPLE"
Command line arguments can be arbitrarily combined, e.g:
.br
\fBpihole-FTL debug test\fR
.br
Start ftl in foreground with more verbose logging, process everything and shutdown immediately
.br
.SH "SEE ALSO"
\fBpihole\fR(8)
.br
\fBFor FTL's config options please see https://docs.pi-hole.net/ftldns/configfile/\fR
.br
.SH "COLOPHON"
Get sucked into the latest news and community activity by entering Pi-hole's orbit. Information about Pi-hole, and the latest version of the software can be found at https://pi-hole.net
.br

@ -141,20 +141,6 @@ Available commands and options:
(0 = lowest, 3 = highest) (0 = lowest, 3 = highest)
.br .br
\fB-c, chronometer\fR [options]
.br
Calculates stats and displays to an LCD
.br
(Chronometer Options):
.br
-j, --json Output stats as JSON formatted string
.br
-r, --refresh Set update frequency (in seconds)
.br
-e, --exit Output stats and exit without refreshing
.br
\fB-g, updateGravity\fR \fB-g, updateGravity\fR
.br .br
Update the list of ad-serving domains Update the list of ad-serving domains

166
pihole

@ -11,10 +11,9 @@
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole" readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
# setupVars and PI_HOLE_BIN_DIR are not readonly here because in some functions (checkout), # PI_HOLE_BIN_DIR is not readonly here because in some functions (checkout),
# they might get set again when the installer is sourced. This causes an # they might get set again when the installer is sourced. This causes an
# error due to modifying a readonly variable. # error due to modifying a readonly variable.
setupVars="/etc/pihole/setupVars.conf"
PI_HOLE_BIN_DIR="/usr/local/bin" PI_HOLE_BIN_DIR="/usr/local/bin"
readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE" readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
@ -31,10 +30,36 @@ if [ -f "${versionsfile}" ]; then
source "${versionsfile}" source "${versionsfile}"
fi fi
webpageFunc() { # TODO: We can probably remove the reliance on this function too, just tell people to pihole-FTL --config webserver.api.password "password"
source "${PI_HOLE_SCRIPT_DIR}/webpage.sh" SetWebPassword() {
main "$@" if [ -n "$2" ] ; then
exit 0 readonly PASSWORD="$2"
readonly CONFIRM="${PASSWORD}"
else
# Prevents a bug if the user presses Ctrl+C and it continues to hide the text typed.
# So we reset the terminal via stty if the user does press Ctrl+C
trap '{ echo -e "\nNot changed" ; stty sane ; exit 1; }' INT
read -s -r -p "Enter New Password (Blank for no password): " PASSWORD
echo ""
if [ "${PASSWORD}" == "" ]; then
setFTLConfigValue "webserver.api.password" ""
echo -e " ${TICK} Password Removed"
exit 0
fi
read -s -r -p "Confirm Password: " CONFIRM
echo ""
fi
if [ "${PASSWORD}" == "${CONFIRM}" ] ; then
# pihole-FTL will automatically hash the password
setFTLConfigValue "webserver.api.password" "${PASSWORD}"
echo -e " ${TICK} New password set"
else
echo -e " ${CROSS} Passwords don't match. Your password has not been changed"
exit 1
fi
} }
listFunc() { listFunc() {
@ -100,8 +125,7 @@ queryFunc() {
} }
chronometerFunc() { chronometerFunc() {
shift echo "Chronometer is gone, use PADD (https://github.com/pi-hole/PADD)"
"${PI_HOLE_SCRIPT_DIR}"/chronometer.sh "$@"
exit 0 exit 0
} }
@ -116,8 +140,7 @@ uninstallFunc() {
} }
versionFunc() { versionFunc() {
shift exec "${PI_HOLE_SCRIPT_DIR}"/version.sh
exec "${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
} }
restartDNS() { restartDNS() {
@ -193,11 +216,11 @@ Time:
elif [[ "${1}" == "0" ]]; then elif [[ "${1}" == "0" ]]; then
# Disable Pi-hole # Disable Pi-hole
if grep -cq "BLOCKING_ENABLED=false" "${setupVars}"; then if ! getFTLConfigValue dns.blocking.active; then
echo -e " ${INFO} Blocking already disabled, nothing to do" echo -e " ${INFO} Blocking already disabled, nothing to do"
exit 0 exit 0
fi fi
if [[ $# > 1 ]]; then if [[ $# -gt 1 ]]; then
local error=false local error=false
if [[ "${2}" == *"s" ]]; then if [[ "${2}" == *"s" ]]; then
tt=${2%"s"} tt=${2%"s"}
@ -233,19 +256,19 @@ Time:
fi fi
local str="Pi-hole Disabled" local str="Pi-hole Disabled"
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "false" setFTLConfigValue dns.blocking.active false
fi fi
else else
# Enable Pi-hole # Enable Pi-hole
killall -q pihole-reenable killall -q pihole-reenable
if grep -cq "BLOCKING_ENABLED=true" "${setupVars}"; then if getFTLConfigValue dns.blocking.active; then
echo -e " ${INFO} Blocking already enabled, nothing to do" echo -e " ${INFO} Blocking already enabled, nothing to do"
exit 0 exit 0
fi fi
echo -e " ${INFO} Enabling blocking" echo -e " ${INFO} Enabling blocking"
local str="Pi-hole Enabled" local str="Pi-hole Enabled"
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "true" setFTLConfigValue dns.blocking.active true
fi fi
restartDNS reload-lists restartDNS reload-lists
@ -267,8 +290,7 @@ Options:
exit 0 exit 0
elif [[ "${1}" == "off" ]]; then elif [[ "${1}" == "off" ]]; then
# Disable logging # Disable logging
removeKey /etc/dnsmasq.d/01-pihole.conf "log-queries" setFTLConfigValue dns.queryLogging false
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "false"
if [[ "${2}" != "noflush" ]]; then if [[ "${2}" != "noflush" ]]; then
# Flush logs # Flush logs
"${PI_HOLE_BIN_DIR}"/pihole -f "${PI_HOLE_BIN_DIR}"/pihole -f
@ -277,8 +299,7 @@ Options:
local str="Logging has been disabled!" local str="Logging has been disabled!"
elif [[ "${1}" == "on" ]]; then elif [[ "${1}" == "on" ]]; then
# Enable logging # Enable logging
addKey /etc/dnsmasq.d/01-pihole.conf "log-queries" setFTLConfigValue dns.queryLogging true
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "true"
echo -e " ${INFO} Enabling logging..." echo -e " ${INFO} Enabling logging..."
local str="Logging has been enabled!" local str="Logging has been enabled!"
else else
@ -323,13 +344,12 @@ analyze_ports() {
statusFunc() { statusFunc() {
# Determine if there is pihole-FTL service is listening # Determine if there is pihole-FTL service is listening
local pid port ftl_api_port ftl_pid_file local pid port ftl_pid_file block_status
ftl_pid_file="$(getFTLPIDFile)" ftl_pid_file="$(getFTLPIDFile)"
pid="$(getFTLPID ${ftl_pid_file})" pid="$(getFTLPID ${ftl_pid_file})"
ftl_api_port="$(getFTLAPIPort)"
if [[ "$pid" -eq "-1" ]]; then if [[ "$pid" -eq "-1" ]]; then
case "${1}" in case "${1}" in
"web") echo "-1";; "web") echo "-1";;
@ -337,8 +357,8 @@ statusFunc() {
esac esac
return 0 return 0
else else
#get the DNS port pihole-FTL is listening on by using FTL's telnet API # get the DNS port pihole-FTL is listening on
port="$(echo ">dns-port >quit" | nc 127.0.0.1 "$ftl_api_port")" port="$(getFTLConfigValue dns.port)"
if [[ "${port}" == "0" ]]; then if [[ "${port}" == "0" ]]; then
case "${1}" in case "${1}" in
"web") echo "-1";; "web") echo "-1";;
@ -354,26 +374,17 @@ statusFunc() {
fi fi
# Determine if Pi-hole's blocking is enabled # Determine if Pi-hole's blocking is enabled
if grep -q "BLOCKING_ENABLED=false" /etc/pihole/setupVars.conf; then block_status=$(getFTLConfigValue dns.blocking.active)
# A config is commented out if [ ${block_status} == "true" ]; then
case "${1}" in
"web") echo 0;;
*) echo -e " ${CROSS} Pi-hole blocking is disabled";;
esac
elif grep -q "BLOCKING_ENABLED=true" /etc/pihole/setupVars.conf; then
# Configs are set
case "${1}" in case "${1}" in
"web") echo "$port";; "web") echo "$port";;
*) echo -e " ${TICK} Pi-hole blocking is enabled";; *) echo -e " ${TICK} Pi-hole blocking is enabled";;
esac esac
else else
# No configs were found
case "${1}" in case "${1}" in
"web") echo -2;; "web") echo 0;;
*) echo -e " ${INFO} Pi-hole blocking will be enabled";; *) echo -e " ${CROSS} Pi-hole blocking is disabled";;
esac esac
# Enable blocking
"${PI_HOLE_BIN_DIR}"/pihole enable
fi fi
exit 0 exit 0
} }
@ -401,26 +412,30 @@ tailFunc() {
} }
piholeCheckoutFunc() { piholeCheckoutFunc() {
if [[ "$2" == "-h" ]] || [[ "$2" == "--help" ]]; then if [ -n "${DOCKER_VERSION}" ]; then
echo "Usage: pihole checkout [repo] [branch] unsupportedFunc
Example: 'pihole checkout master' or 'pihole checkout core dev' else
Switch Pi-hole subsystems to a different GitHub branch if [[ "$2" == "-h" ]] || [[ "$2" == "--help" ]]; then
echo "Usage: pihole checkout [repo] [branch]
Repositories: Example: 'pihole checkout master' or 'pihole checkout core dev'
core [branch] Change the branch of Pi-hole's core subsystem Switch Pi-hole subsystems to a different GitHub branch
web [branch] Change the branch of Web Interface subsystem
ftl [branch] Change the branch of Pi-hole's FTL subsystem Repositories:
core [branch] Change the branch of Pi-hole's core subsystem
Branches: web [branch] Change the branch of Web Interface subsystem
master Update subsystems to the latest stable release ftl [branch] Change the branch of Pi-hole's FTL subsystem
dev Update subsystems to the latest development release
branchname Update subsystems to the specified branchname" Branches:
exit 0 master Update subsystems to the latest stable release
fi dev Update subsystems to the latest development release
branchname Update subsystems to the specified branchname"
exit 0
fi
source "${PI_HOLE_SCRIPT_DIR}"/piholeCheckout.sh source "${PI_HOLE_SCRIPT_DIR}"/piholeCheckout.sh
shift shift
checkout "$@" checkout "$@"
fi
} }
tricorderFunc() { tricorderFunc() {
@ -480,10 +495,9 @@ Debugging Options:
Options: Options:
-a, admin Web interface options setpassword [pwd] Set the password for the web interface
Add '-h' for more info on Web Interface usage Without optional argument, password is read interactively.
-c, chronometer Calculates stats and displays to an LCD When specifying a password directly, enclose it in single quotes.
Add '-h' for more info on chronometer usage
-g, updateGravity Update the list of ad-serving domains -g, updateGravity Update the list of ad-serving domains
-h, --help, help Show this help dialog -h, --help, help Show this help dialog
-l, logging Specify whether the Pi-hole log should be used -l, logging Specify whether the Pi-hole log should be used
@ -493,7 +507,6 @@ Options:
-up, updatePihole Update Pi-hole subsystems -up, updatePihole Update Pi-hole subsystems
Add '--check-only' to exit script before update is performed. Add '--check-only' to exit script before update is performed.
-v, version Show installed versions of Pi-hole, Web Interface & FTL -v, version Show installed versions of Pi-hole, Web Interface & FTL
Add '-h' for more info on version usage
uninstall Uninstall Pi-hole from your system uninstall Uninstall Pi-hole from your system
status Display the running status of Pi-hole subsystems status Display the running status of Pi-hole subsystems
enable Enable Pi-hole subsystems enable Enable Pi-hole subsystems
@ -513,13 +526,13 @@ if [[ $# = 0 ]]; then
fi fi
# functions that do not require sudo power # functions that do not require sudo power
need_root=1
case "${1}" in case "${1}" in
"-h" | "help" | "--help" ) helpFunc;; "-h" | "help" | "--help" ) helpFunc;;
"-v" | "version" ) versionFunc "$@";; "-v" | "version" ) versionFunc;;
"-c" | "chronometer" ) chronometerFunc "$@";; "-c" | "chronometer" ) chronometerFunc "$@";;
"-q" | "query" ) queryFunc "$@";; "-q" | "query" ) queryFunc "$@";;
"status" ) statusFunc "$2";; "status" ) statusFunc "$2";;
"tricorder" ) tricorderFunc;; "tricorder" ) tricorderFunc;;
# we need to add all arguments that require sudo power to not trigger the * argument # we need to add all arguments that require sudo power to not trigger the * argument
@ -532,14 +545,15 @@ case "${1}" in
"-f" | "flush" ) ;; "-f" | "flush" ) ;;
"-up" | "updatePihole" ) ;; "-up" | "updatePihole" ) ;;
"-r" | "reconfigure" ) ;; "-r" | "reconfigure" ) ;;
"-g" | "updateGravity" ) ;;
"-l" | "logging" ) ;; "-l" | "logging" ) ;;
"uninstall" ) ;; "uninstall" ) ;;
"enable" ) ;; "enable" ) ;;
"disable" ) ;; "disable" ) ;;
"-d" | "debug" ) ;; "-d" | "debug" ) ;;
"restartdns" ) ;; "restartdns" ) ;;
"-a" | "admin" ) ;; "-g" | "updateGravity" ) need_root=0;;
"reloaddns" ) need_root=0;;
"setpassword" ) ;;
"checkout" ) ;; "checkout" ) ;;
"updatechecker" ) ;; "updatechecker" ) ;;
"arpflush" ) ;; "arpflush" ) ;;
@ -547,8 +561,8 @@ case "${1}" in
* ) helpFunc;; * ) helpFunc;;
esac esac
# Must be root to use this tool # Must be root to use this tool for most functions
if [[ ! $EUID -eq 0 ]];then if [[ ! $EUID -eq 0 && need_root -eq 1 ]];then
if [[ -x "$(command -v sudo)" ]]; then if [[ -x "$(command -v sudo)" ]]; then
exec sudo bash "$0" "$@" exec sudo bash "$0" "$@"
exit $? exit $?
@ -558,6 +572,23 @@ if [[ ! $EUID -eq 0 ]];then
fi fi
fi fi
# In the case of alpine running in a container, the USER variable appears to be blank
# which prevents the next trap from working correctly. Set it by running whoami
if [[ -z ${USER} ]]; then
USER=$(whoami)
fi
# Can also be user pihole for other functions
if [[ ${USER} != "pihole" && need_root -eq 0 ]];then
if [[ -x "$(command -v sudo)" ]]; then
exec sudo -u pihole bash "$0" "$@"
exit $?
else
echo -e " ${CROSS} sudo is needed to run pihole commands. Please run this script as root or install sudo."
exit 1
fi
fi
# Handle redirecting to specific functions based on arguments # Handle redirecting to specific functions based on arguments
case "${1}" in case "${1}" in
"-w" | "whitelist" ) listFunc "$@";; "-w" | "whitelist" ) listFunc "$@";;
@ -576,7 +607,8 @@ case "${1}" in
"enable" ) piholeEnable 1;; "enable" ) piholeEnable 1;;
"disable" ) piholeEnable 0 "$2";; "disable" ) piholeEnable 0 "$2";;
"restartdns" ) restartDNS "$2";; "restartdns" ) restartDNS "$2";;
"-a" | "admin" ) webpageFunc "$@";; "reloaddns" ) restartDNS "reload";;
"setpassword" ) SetWebPassword "$@";;
"checkout" ) piholeCheckoutFunc "$@";; "checkout" ) piholeCheckoutFunc "$@";;
"updatechecker" ) shift; updateCheckFunc "$@";; "updatechecker" ) shift; updateCheckFunc "$@";;
"arpflush" ) arpFunc "$@";; "arpflush" ) arpFunc "$@";;

@ -4,13 +4,6 @@ import testinfra.backend.docker
import subprocess import subprocess
from textwrap import dedent from textwrap import dedent
SETUPVARS = {
"PIHOLE_INTERFACE": "eth99",
"PIHOLE_DNS_1": "4.2.2.1",
"PIHOLE_DNS_2": "4.2.2.2",
}
IMAGE = "pytest_pihole:test_container" IMAGE = "pytest_pihole:test_container"
tick_box = "[\x1b[1;32m\u2713\x1b[0m]" tick_box = "[\x1b[1;32m\u2713\x1b[0m]"

@ -2,7 +2,6 @@ import pytest
from textwrap import dedent from textwrap import dedent
import re import re
from .conftest import ( from .conftest import (
SETUPVARS,
tick_box, tick_box,
info_box, info_box,
cross_box, cross_box,
@ -13,6 +12,8 @@ from .conftest import (
run_script, run_script,
) )
FTL_BRANCH = "development-v6"
def test_supported_package_manager(host): def test_supported_package_manager(host):
""" """
@ -32,76 +33,6 @@ def test_supported_package_manager(host):
# assert package_manager_detect.rc == 1 # assert package_manager_detect.rc == 1
def test_setupVars_are_sourced_to_global_scope(host):
"""
currently update_dialogs sources setupVars with a dot,
then various other functions use the variables.
This confirms the sourced variables are in scope between functions
"""
setup_var_file = "cat <<EOF> /etc/pihole/setupVars.conf\n"
for k, v in SETUPVARS.items():
setup_var_file += "{}={}\n".format(k, v)
setup_var_file += "EOF\n"
host.run(setup_var_file)
script = dedent(
"""\
set -e
printSetupVars() {
# Currently debug test function only
echo "Outputting sourced variables"
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
echo "PIHOLE_DNS_1=${PIHOLE_DNS_1}"
echo "PIHOLE_DNS_2=${PIHOLE_DNS_2}"
}
update_dialogs() {
. /etc/pihole/setupVars.conf
}
update_dialogs
printSetupVars
"""
)
output = run_script(host, script).stdout
for k, v in SETUPVARS.items():
assert "{}={}".format(k, v) in output
def test_setupVars_saved_to_file(host):
"""
confirm saved settings are written to a file for future updates to reuse
"""
# dedent works better with this and padding matching script below
set_setup_vars = "\n"
for k, v in SETUPVARS.items():
set_setup_vars += " {}={}\n".format(k, v)
host.run(set_setup_vars)
script = dedent(
"""\
set -e
echo start
TERM=xterm
source /opt/pihole/basic-install.sh
source /opt/pihole/utils.sh
{}
mkdir -p /etc/dnsmasq.d
version_check_dnsmasq
echo "" > /etc/pihole/pihole-FTL.conf
finalExports
cat /etc/pihole/setupVars.conf
""".format(
set_setup_vars
)
)
output = run_script(host, script).stdout
for k, v in SETUPVARS.items():
assert "{}={}".format(k, v) in output
def test_selinux_not_detected(host): def test_selinux_not_detected(host):
""" """
confirms installer continues when SELinux configuration file does not exist confirms installer continues when SELinux configuration file does not exist
@ -118,21 +49,6 @@ def test_selinux_not_detected(host):
assert check_selinux.rc == 0 assert check_selinux.rc == 0
def test_installPiholeWeb_fresh_install_no_errors(host):
"""
confirms all web page assets from Core repo are installed on a fresh build
"""
installWeb = host.run(
"""
umask 0027
source /opt/pihole/basic-install.sh
installPiholeWeb
"""
)
expected_stdout = tick_box + " Installing sudoer file"
assert expected_stdout in installWeb.stdout
def get_directories_recursive(host, directory): def get_directories_recursive(host, directory):
if directory is None: if directory is None:
return directory return directory
@ -150,13 +66,10 @@ def test_installPihole_fresh_install_readableFiles(host):
mock_command("dialog", {"*": ("", "0")}, host) mock_command("dialog", {"*": ("", "0")}, host)
# mock git pull # mock git pull
mock_command_passthrough("git", {"pull": ("", "0")}, host) mock_command_passthrough("git", {"pull": ("", "0")}, host)
# mock systemctl to not start lighttpd and FTL # mock systemctl to not start FTL
mock_command_2( mock_command_2(
"systemctl", "systemctl",
{ {
"enable lighttpd": ("", "0"),
"restart lighttpd": ("", "0"),
"start lighttpd": ("", "0"),
"enable pihole-FTL": ("", "0"), "enable pihole-FTL": ("", "0"),
"restart pihole-FTL": ("", "0"), "restart pihole-FTL": ("", "0"),
"start pihole-FTL": ("", "0"), "start pihole-FTL": ("", "0"),
@ -168,20 +81,8 @@ def test_installPihole_fresh_install_readableFiles(host):
host.run("command -v apt-get > /dev/null && apt-get install -qq man") host.run("command -v apt-get > /dev/null && apt-get install -qq man")
host.run("command -v dnf > /dev/null && dnf install -y man") host.run("command -v dnf > /dev/null && dnf install -y man")
host.run("command -v yum > /dev/null && yum install -y man") host.run("command -v yum > /dev/null && yum install -y man")
# create configuration file # Workaround to get FTLv6 installed until it reaches master branch
setup_var_file = "cat <<EOF> /etc/pihole/setupVars.conf\n" host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
for k, v in SETUPVARS.items():
setup_var_file += "{}={}\n".format(k, v)
setup_var_file += "INSTALL_WEB_SERVER=true\n"
setup_var_file += "INSTALL_WEB_INTERFACE=true\n"
setup_var_file += "EOF\n"
host.run(setup_var_file)
# Install FTL's development branch to get the latest features
host.run(
"""
echo "development" > /etc/pihole/ftlbranch
"""
)
install = host.run( install = host.run(
""" """
export TERM=xterm export TERM=xterm
@ -193,6 +94,7 @@ def test_installPihole_fresh_install_readableFiles(host):
runUnattended=true runUnattended=true
useUpdateVars=true useUpdateVars=true
main main
/opt/pihole/pihole-FTL-prestart.sh
""" """
) )
assert 0 == install.rc assert 0 == install.rc
@ -238,34 +140,6 @@ def test_installPihole_fresh_install_readableFiles(host):
check_macvendor = test_cmd.format("r", "/etc/pihole/macvendor.db", piholeuser) check_macvendor = test_cmd.format("r", "/etc/pihole/macvendor.db", piholeuser)
actual_rc = host.run(check_macvendor).rc actual_rc = host.run(check_macvendor).rc
assert exit_status_success == actual_rc assert exit_status_success == actual_rc
# readable and writeable pihole-FTL.conf
check_FTLconf = test_cmd.format("r", "/etc/pihole/pihole-FTL.conf", piholeuser)
actual_rc = host.run(check_FTLconf).rc
assert exit_status_success == actual_rc
check_FTLconf = test_cmd.format("w", "/etc/pihole/pihole-FTL.conf", piholeuser)
actual_rc = host.run(check_FTLconf).rc
assert exit_status_success == actual_rc
# readable setupVars.conf
check_setup = test_cmd.format("r", "/etc/pihole/setupVars.conf", piholeuser)
actual_rc = host.run(check_setup).rc
assert exit_status_success == actual_rc
# check dnsmasq files
# readable /etc/dnsmasq.conf
check_dnsmasqconf = test_cmd.format("r", "/etc/dnsmasq.conf", piholeuser)
actual_rc = host.run(check_dnsmasqconf).rc
assert exit_status_success == actual_rc
# readable /etc/dnsmasq.d/01-pihole.conf
check_dnsmasqconf = test_cmd.format("r", "/etc/dnsmasq.d", piholeuser)
actual_rc = host.run(check_dnsmasqconf).rc
assert exit_status_success == actual_rc
check_dnsmasqconf = test_cmd.format("x", "/etc/dnsmasq.d", piholeuser)
actual_rc = host.run(check_dnsmasqconf).rc
assert exit_status_success == actual_rc
check_dnsmasqconf = test_cmd.format(
"r", "/etc/dnsmasq.d/01-pihole.conf", piholeuser
)
actual_rc = host.run(check_dnsmasqconf).rc
assert exit_status_success == actual_rc
# check readable and executable /etc/init.d/pihole-FTL # check readable and executable /etc/init.d/pihole-FTL
check_init = test_cmd.format("x", "/etc/init.d/pihole-FTL", piholeuser) check_init = test_cmd.format("x", "/etc/init.d/pihole-FTL", piholeuser)
actual_rc = host.run(check_init).rc actual_rc = host.run(check_init).rc
@ -273,28 +147,6 @@ def test_installPihole_fresh_install_readableFiles(host):
check_init = test_cmd.format("r", "/etc/init.d/pihole-FTL", piholeuser) check_init = test_cmd.format("r", "/etc/init.d/pihole-FTL", piholeuser)
actual_rc = host.run(check_init).rc actual_rc = host.run(check_init).rc
assert exit_status_success == actual_rc assert exit_status_success == actual_rc
# check readable /etc/lighttpd/lighttpd.conf
check_lighttpd = test_cmd.format("r", "/etc/lighttpd/lighttpd.conf", piholeuser)
actual_rc = host.run(check_lighttpd).rc
assert exit_status_success == actual_rc
# check readable /etc/lighttpd/conf*/pihole-admin.conf
check_lighttpd = test_cmd.format("r", "/etc/lighttpd/conf.d", piholeuser)
if host.run(check_lighttpd).rc == exit_status_success:
check_lighttpd = test_cmd.format(
"r", "/etc/lighttpd/conf.d/pihole-admin.conf", piholeuser
)
actual_rc = host.run(check_lighttpd).rc
assert exit_status_success == actual_rc
else:
check_lighttpd = test_cmd.format(
"r", "/etc/lighttpd/conf-available", piholeuser
)
if host.run(check_lighttpd).rc == exit_status_success:
check_lighttpd = test_cmd.format(
"r", "/etc/lighttpd/conf-available/15-pihole-admin.conf", piholeuser
)
actual_rc = host.run(check_lighttpd).rc
assert exit_status_success == actual_rc
# check readable and executable manpages # check readable and executable manpages
if maninstalled is True: if maninstalled is True:
check_man = test_cmd.format("x", "/usr/local/share/man", piholeuser) check_man = test_cmd.format("x", "/usr/local/share/man", piholeuser)
@ -320,15 +172,6 @@ def test_installPihole_fresh_install_readableFiles(host):
) )
actual_rc = host.run(check_man).rc actual_rc = host.run(check_man).rc
assert exit_status_success == actual_rc assert exit_status_success == actual_rc
check_man = test_cmd.format(
"r", "/usr/local/share/man/man8/pihole-FTL.8", piholeuser
)
actual_rc = host.run(check_man).rc
assert exit_status_success == actual_rc
# check not readable sudoers file
check_sudo = test_cmd.format("r", "/etc/sudoers.d/pihole", piholeuser)
actual_rc = host.run(check_sudo).rc
assert exit_status_success != actual_rc
# check not readable cron file # check not readable cron file
check_sudo = test_cmd.format("x", "/etc/cron.d/", piholeuser) check_sudo = test_cmd.format("x", "/etc/cron.d/", piholeuser)
actual_rc = host.run(check_sudo).rc actual_rc = host.run(check_sudo).rc
@ -353,237 +196,6 @@ def test_installPihole_fresh_install_readableFiles(host):
actual_rc = host.run(check_pihole).rc actual_rc = host.run(check_pihole).rc
@pytest.mark.parametrize("test_webpage", [True])
def test_installPihole_fresh_install_readableBlockpage(host, test_webpage):
"""
confirms all web page assets from Core repo are readable
by $LIGHTTPD_USER on a fresh build
"""
piholeWebpage = [
"127.0.0.1",
# "pi.hole"
]
# dialog returns Cancel for user prompt
mock_command("dialog", {"*": ("", "0")}, host)
# mock git pull
mock_command_passthrough("git", {"pull": ("", "0")}, host)
# mock systemctl to start lighttpd and FTL
ligthttpdcommand = dedent(
r'''\"\"
echo 'starting lighttpd with {}'
if [ command -v "apt-get" >/dev/null 2>&1 ]; then
LIGHTTPD_USER="www-data"
LIGHTTPD_GROUP="www-data"
else
LIGHTTPD_USER="lighttpd"
LIGHTTPD_GROUP="lighttpd"
fi
mkdir -p "{run}"
chown {usergroup} "{run}"
mkdir -p "{cache}"
chown {usergroup} "/var/cache"
chown {usergroup} "{cache}"
mkdir -p "{compress}"
chown {usergroup} "{compress}"
mkdir -p "{uploads}"
chown {usergroup} "{uploads}"
chmod 0777 /var
chmod 0777 /var/cache
chmod 0777 "{cache}"
find "{run}" -type d -exec chmod 0777 {chmodarg} \;;
find "{run}" -type f -exec chmod 0666 {chmodarg} \;;
find "{compress}" -type d -exec chmod 0777 {chmodarg} \;;
find "{compress}" -type f -exec chmod 0666 {chmodarg} \;;
find "{uploads}" -type d -exec chmod 0777 {chmodarg} \;;
find "{uploads}" -type f -exec chmod 0666 {chmodarg} \;;
/usr/sbin/lighttpd -tt -f '{config}'
/usr/sbin/lighttpd -f '{config}'
echo \"\"'''.format(
"{}",
usergroup="${{LIGHTTPD_USER}}:${{LIGHTTPD_GROUP}}",
chmodarg="{{}}",
config="/etc/lighttpd/lighttpd.conf",
run="/run/lighttpd",
cache="/var/cache/lighttpd",
uploads="/var/cache/lighttpd/uploads",
compress="/var/cache/lighttpd/compress",
)
)
FTLcommand = dedent(
'''\"\"
set -x
/etc/init.d/pihole-FTL restart
echo \"\"'''
)
mock_command_run(
"systemctl",
{
"enable lighttpd": ("", "0"),
"restart lighttpd": (ligthttpdcommand.format("restart"), "0"),
"start lighttpd": (ligthttpdcommand.format("start"), "0"),
"enable pihole-FTL": ("", "0"),
"restart pihole-FTL": (FTLcommand, "0"),
"start pihole-FTL": (FTLcommand, "0"),
"*": ('echo "systemctl call with $@"', "0"),
},
host,
)
# create configuration file
setup_var_file = "cat <<EOF> /etc/pihole/setupVars.conf\n"
for k, v in SETUPVARS.items():
setup_var_file += "{}={}\n".format(k, v)
setup_var_file += "INSTALL_WEB_SERVER=true\n"
setup_var_file += "INSTALL_WEB_INTERFACE=true\n"
setup_var_file += "EOF\n"
host.run(setup_var_file)
# Install FTL's development branch to get the latest features
host.run(
"""
echo "development" > /etc/pihole/ftlbranch
"""
)
installWeb = host.run(
"""
export TERM=xterm
export DEBIAN_FRONTEND=noninteractive
umask 0027
runUnattended=true
useUpdateVars=true
source /opt/pihole/basic-install.sh > /dev/null
runUnattended=true
useUpdateVars=true
main
echo "LIGHTTPD_USER=${LIGHTTPD_USER}"
echo "webroot=${webroot}"
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
echo "INSTALL_WEB_SERVER=${INSTALL_WEB_SERVER}"
"""
)
assert 0 == installWeb.rc
piholeuser = "pihole"
webuser = ""
user = re.findall(r"^\s*LIGHTTPD_USER=.*$", installWeb.stdout, re.MULTILINE)
for match in user:
webuser = match.replace("LIGHTTPD_USER=", "").strip()
webroot = ""
user = re.findall(r"^\s*webroot=.*$", installWeb.stdout, re.MULTILINE)
for match in user:
webroot = match.replace("webroot=", "").strip()
if not webroot.strip():
webroot = "/var/www/html"
installWebInterface = True
interface = re.findall(
r"^\s*INSTALL_WEB_INTERFACE=.*$", installWeb.stdout, re.MULTILINE
)
for match in interface:
testvalue = match.replace("INSTALL_WEB_INTERFACE=", "").strip().lower()
if not testvalue.strip():
installWebInterface = testvalue == "true"
installWebServer = True
server = re.findall(r"^\s*INSTALL_WEB_SERVER=.*$", installWeb.stdout, re.MULTILINE)
for match in server:
testvalue = match.replace("INSTALL_WEB_SERVER=", "").strip().lower()
if not testvalue.strip():
installWebServer = testvalue == "true"
# if webserver install was not requested
# at least pihole must be able to read files
if installWebServer is False:
webuser = piholeuser
exit_status_success = 0
test_cmd = 'su --shell /bin/bash --command "test -{0} {1}" -p {2}'
# check files that need a running FTL to be created
# readable and writeable pihole-FTL.db
check_FTLconf = test_cmd.format("r", "/etc/pihole/pihole-FTL.db", piholeuser)
actual_rc = host.run(check_FTLconf).rc
assert exit_status_success == actual_rc
check_FTLconf = test_cmd.format("w", "/etc/pihole/pihole-FTL.db", piholeuser)
actual_rc = host.run(check_FTLconf).rc
assert exit_status_success == actual_rc
# check directories above $webroot for read and execute permission
check_var = test_cmd.format("r", "/var", webuser)
actual_rc = host.run(check_var).rc
assert exit_status_success == actual_rc
check_var = test_cmd.format("x", "/var", webuser)
actual_rc = host.run(check_var).rc
assert exit_status_success == actual_rc
check_www = test_cmd.format("r", "/var/www", webuser)
actual_rc = host.run(check_www).rc
assert exit_status_success == actual_rc
check_www = test_cmd.format("x", "/var/www", webuser)
actual_rc = host.run(check_www).rc
assert exit_status_success == actual_rc
check_html = test_cmd.format("r", "/var/www/html", webuser)
actual_rc = host.run(check_html).rc
assert exit_status_success == actual_rc
check_html = test_cmd.format("x", "/var/www/html", webuser)
actual_rc = host.run(check_html).rc
assert exit_status_success == actual_rc
# check directories below $webroot for read and execute permission
check_admin = test_cmd.format("r", webroot + "/admin", webuser)
actual_rc = host.run(check_admin).rc
assert exit_status_success == actual_rc
check_admin = test_cmd.format("x", webroot + "/admin", webuser)
actual_rc = host.run(check_admin).rc
assert exit_status_success == actual_rc
directories = get_directories_recursive(host, webroot + "/admin/")
for directory in directories:
check_pihole = test_cmd.format("r", directory, webuser)
actual_rc = host.run(check_pihole).rc
check_pihole = test_cmd.format("x", directory, webuser)
actual_rc = host.run(check_pihole).rc
findfiles = 'find "{}" -maxdepth 1 -type f -exec echo {{}} \\;;'
filelist = host.run(findfiles.format(directory))
files = list(filter(bool, filelist.stdout.splitlines()))
for file in files:
check_pihole = test_cmd.format("r", file, webuser)
actual_rc = host.run(check_pihole).rc
# check web interface files
# change nameserver to pi-hole
# setting nameserver in /etc/resolv.conf to pi-hole does
# not work here because of the way docker uses this file
ns = host.run(r"sed -i 's/nameserver.*/nameserver 127.0.0.1/' /etc/resolv.conf")
pihole_is_ns = ns.rc == 0
def is_ip(address):
m = re.match(r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})", address)
return bool(m)
if installWebInterface is True:
if test_webpage is True:
# check webpage for unreadable files
noPHPfopen = re.compile(
(
r"PHP Error(%d+):\s+fopen([^)]+):\s+"
+ r"failed to open stream: "
+ r"Permission denied in"
),
re.I,
)
# using cURL option --dns-servers is not possible
status = (
'curl -s --head "{}" | '
+ "head -n 1 | "
+ 'grep "HTTP/1.[01] [23].." > /dev/null'
)
digcommand = r"dig A +short {} @127.0.0.1 | head -n 1"
pagecontent = 'curl --verbose -L "{}"'
for page in piholeWebpage:
testpage = "http://" + page + "/admin/"
resolvesuccess = True
if is_ip(page) is False:
dig = host.run(digcommand.format(page))
testpage = "http://" + dig.stdout.strip() + "/admin/"
resolvesuccess = dig.rc == 0
if resolvesuccess or pihole_is_ns:
# check HTTP status of blockpage
actual_rc = host.run(status.format(testpage))
assert exit_status_success == actual_rc.rc
# check for PHP error
actual_output = host.run(pagecontent.format(testpage))
assert noPHPfopen.match(actual_output.stdout) is None
def test_update_package_cache_success_no_errors(host): def test_update_package_cache_success_no_errors(host):
""" """
confirms package cache was updated without any errors confirms package cache was updated without any errors
@ -617,190 +229,35 @@ def test_update_package_cache_failure_no_errors(host):
assert "Error: Unable to update package cache." in updateCache.stdout assert "Error: Unable to update package cache." in updateCache.stdout
def test_FTL_detect_aarch64_no_errors(host): @pytest.mark.parametrize(
""" "arch,detected_string,supported",
confirms only aarch64 package is downloaded for FTL engine [
""" ("aarch64", "AArch64 (64 Bit ARM)", True),
# mock uname to return aarch64 platform ("armv6", "ARMv6", True),
mock_command("uname", {"-m": ("aarch64", "0")}, host) ("armv7l", "ARMv7 (or newer)", True),
# mock ldd to respond with aarch64 shared library ("armv7", "ARMv7 (or newer)", True),
mock_command( ("armv8a", "ARMv7 (or newer)", True),
"ldd", ("x86_64", "x86_64", True),
{ ("riscv64", "riscv64", True),
"/bin/sh": ("/lib/ld-linux-aarch64.so.1", "0"), ("mips", "mips", False),
"/usr/bin/sh": ("/lib/ld-linux-aarch64.so.1", "0"), ],
}, )
host, def test_FTL_detect_no_errors(host, arch, detected_string, supported):
)
detectPlatform = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
expected_stdout = info_box + " FTL Checks..."
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Detected AArch64 (64 Bit ARM) processor"
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv4t_no_errors(host):
"""
confirms only armv4t package is downloaded for FTL engine
"""
# mock uname to return armv4t platform
mock_command("uname", {"-m": ("armv4t", "0")}, host)
# mock ldd to respond with armv4t shared library
mock_command(
"ldd",
{
"/bin/sh": ("/lib/ld-linux.so.3", "0"),
"/usr/bin/sh": ("/lib/ld-linux.so.3", "0"),
},
host,
)
detectPlatform = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
expected_stdout = info_box + " FTL Checks..."
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (" Detected ARMv4 processor")
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv5te_no_errors(host):
"""
confirms only armv5te package is downloaded for FTL engine
"""
# mock uname to return armv5te platform
mock_command("uname", {"-m": ("armv5te", "0")}, host)
# mock ldd to respond with ld-linux shared library
mock_command(
"ldd",
{
"/bin/sh": ("/lib/ld-linux.so.3", "0"),
"/usr/bin/sh": ("/lib/ld-linux.so.3", "0"),
},
host,
)
detectPlatform = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
expected_stdout = info_box + " FTL Checks..."
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (" Detected ARMv5 (or newer) processor")
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv6l_no_errors(host):
"""
confirms only armv6l package is downloaded for FTL engine
"""
# mock uname to return armv6l platform
mock_command("uname", {"-m": ("armv6l", "0")}, host)
# mock ldd to respond with ld-linux-armhf shared library
mock_command(
"ldd",
{
"/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
"/usr/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
},
host,
)
detectPlatform = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
expected_stdout = info_box + " FTL Checks..."
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (
" Detected ARMv6 processor " "(with hard-float support)"
)
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv7l_no_errors(host):
"""
confirms only armv7l package is downloaded for FTL engine
"""
# mock uname to return armv7l platform
mock_command("uname", {"-m": ("armv7l", "0")}, host)
# mock ldd to respond with ld-linux-armhf shared library
mock_command(
"ldd",
{
"/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
"/usr/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"),
},
host,
)
detectPlatform = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
expected_stdout = info_box + " FTL Checks..."
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (
" Detected ARMv7 processor " "(with hard-float support)"
)
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv8a_no_errors(host):
""" """
confirms only armv8a package is downloaded for FTL engine confirms only correct package is downloaded for FTL engine
""" """
# mock uname to return armv8a platform # mock uname to return passed platform
mock_command("uname", {"-m": ("armv8a", "0")}, host) mock_command("uname", {"-m": (arch, "0")}, host)
# mock ldd to respond with ld-linux-armhf shared library # mock readelf to respond with passed CPU architecture
mock_command( mock_command_2(
"ldd", "readelf",
{ {
"/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"), "-A /bin/sh": ("Tag_CPU_arch: " + arch, "0"),
"/usr/bin/sh": ("/lib/ld-linux-armhf.so.3", "0"), "-A /usr/bin/sh": ("Tag_CPU_arch: " + arch, "0"),
}, },
host, host,
) )
host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
detectPlatform = host.run( detectPlatform = host.run(
""" """
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
@ -811,89 +268,30 @@ def test_FTL_detect_armv8a_no_errors(host):
FTLdetect "${binary}" "${theRest}" FTLdetect "${binary}" "${theRest}"
""" """
) )
expected_stdout = info_box + " FTL Checks..." if supported:
assert expected_stdout in detectPlatform.stdout expected_stdout = info_box + " FTL Checks..."
expected_stdout = tick_box + " Detected ARMv8 (or newer) processor" assert expected_stdout in detectPlatform.stdout
assert expected_stdout in detectPlatform.stdout expected_stdout = tick_box + " Detected " + detected_string + " architecture"
expected_stdout = tick_box + " Downloading and Installing FTL" assert expected_stdout in detectPlatform.stdout
assert expected_stdout in detectPlatform.stdout expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in detectPlatform.stdout
else:
def test_FTL_detect_x86_64_no_errors(host): expected_stdout = (
""" "Not able to detect architecture (unknown: " + detected_string + ")"
confirms only x86_64 package is downloaded for FTL engine )
""" assert expected_stdout in detectPlatform.stdout
detectPlatform = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
expected_stdout = info_box + " FTL Checks..."
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Detected x86_64 processor"
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_unknown_no_errors(host):
"""confirms only generic package is downloaded for FTL engine"""
# mock uname to return generic platform
mock_command("uname", {"-m": ("mips", "0")}, host)
detectPlatform = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
expected_stdout = "Not able to detect processor (unknown: mips)"
assert expected_stdout in detectPlatform.stdout
def test_FTL_download_aarch64_no_errors(host):
"""
confirms only aarch64 package is downloaded for FTL engine
"""
# mock dialog answers and ensure installer dependencies
mock_command("dialog", {"*": ("", "0")}, host)
host.run(
"""
source /opt/pihole/basic-install.sh
package_manager_detect
install_dependent_packages ${INSTALLER_DEPS[@]}
"""
)
download_binary = host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
FTLinstall "pihole-FTL-aarch64-linux-gnu"
"""
)
expected_stdout = tick_box + " Downloading and Installing FTL"
assert expected_stdout in download_binary.stdout
assert "error" not in download_binary.stdout.lower()
def test_FTL_binary_installed_and_responsive_no_errors(host): def test_FTL_development_binary_installed_and_responsive_no_errors(host):
""" """
confirms FTL binary is copied and functional in installed location confirms FTL development binary is copied and functional in installed location
""" """
host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
host.run( host.run(
""" """
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
create_pihole_user create_pihole_user
funcOutput=$(get_binary_name) funcOutput=$(get_binary_name)
echo "development" > /etc/pihole/ftlbranch
binary="pihole-FTL${funcOutput##*pihole-FTL}" binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}" theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}" FTLdetect "${binary}" "${theRest}"
@ -1163,30 +561,3 @@ def test_package_manager_has_web_deps(host):
assert "No package" not in output.stdout assert "No package" not in output.stdout
assert output.rc == 0 assert output.rc == 0
def test_webpage_sh_valid_domain(host):
"""Confirms checkDomain function in webpage.sh works as expected"""
check1 = host.run(
"""
source /opt/pihole/webpage.sh
checkDomain "pi-hole.net"
"""
)
check2 = host.run(
"""
source /opt/pihole/webpage.sh
checkDomain "ab.pi-hole.net"
"""
)
check3 = host.run(
"""
source /opt/pihole/webpage.sh
checkDomain "abc.pi-hole.net"
"""
)
assert "pi-hole.net" in check1.stdout
assert "ab.pi-hole.net" in check2.stdout
assert "abc.pi-hole.net" in check3.stdout

@ -82,52 +82,6 @@ def test_key_removal_works(host):
assert expected_stdout == output.stdout assert expected_stdout == output.stdout
def test_getFTLAPIPort_default(host):
"""Confirms getFTLAPIPort returns the default API port"""
output = host.run(
"""
source /opt/pihole/utils.sh
getFTLAPIPort
"""
)
expected_stdout = "4711\n"
assert expected_stdout == output.stdout
def test_getFTLAPIPort_custom(host):
"""Confirms getFTLAPIPort returns a custom API port"""
host.run(
"""
echo "FTLPORT=1234" > /etc/pihole/pihole-FTL.conf
"""
)
output = host.run(
"""
source /opt/pihole/utils.sh
getFTLAPIPort
"""
)
expected_stdout = "1234\n"
assert expected_stdout == output.stdout
def test_getFTLAPIPort_malicious(host):
"""Confirms getFTLAPIPort returns 4711 if the setting in pihole-FTL.conf contains non-digits"""
host.run(
"""
echo "FTLPORT=*$ssdfsd" > /etc/pihole/pihole-FTL.conf
"""
)
output = host.run(
"""
source /opt/pihole/utils.sh
getFTLAPIPort
"""
)
expected_stdout = "4711\n"
assert expected_stdout == output.stdout
def test_getFTLPIDFile_default(host): def test_getFTLPIDFile_default(host):
"""Confirms getFTLPIDFile returns the default PID file path""" """Confirms getFTLPIDFile returns the default PID file path"""
output = host.run( output = host.run(
@ -170,3 +124,32 @@ def test_getFTLPIDFile_and_getFTLPID_custom(host):
) )
expected_stdout = "1234\n" expected_stdout = "1234\n"
assert expected_stdout == output.stdout assert expected_stdout == output.stdout
def test_getFTLConfigValue_getFTLConfigValue(host):
"""
Confirms getFTLConfigValue works (also assumes setFTLConfigValue works)
Requires FTL to be installed, so we do that first
(taken from test_FTL_development_binary_installed_and_responsive_no_errors)
"""
host.run(
"""
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
echo "development-v6" > /etc/pihole/ftlbranch
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
"""
)
output = host.run(
"""
source /opt/pihole/utils.sh
setFTLConfigValue "dns.upstreams" '["9.9.9.9"]' > /dev/null
getFTLConfigValue "dns.upstreams"
"""
)
assert "[ 9.9.9.9 ]" in output.stdout

@ -1,27 +0,0 @@
import pytest
from .conftest import (
tick_box,
info_box,
cross_box,
mock_command,
)
def test_enable_epel_repository_centos(host):
"""
confirms the EPEL package repository is enabled when installed on CentOS
"""
package_manager_detect = host.run(
"""
source /opt/pihole/basic-install.sh
package_manager_detect
"""
)
expected_stdout = info_box + (
" Enabling EPEL package repository " "(https://fedoraproject.org/wiki/EPEL)"
)
assert expected_stdout in package_manager_detect.stdout
expected_stdout = tick_box + " Installed"
assert expected_stdout in package_manager_detect.stdout
epel_package = host.package("epel-release")
assert epel_package.is_installed

@ -1,15 +0,0 @@
def test_epel_and_remi_not_installed_fedora(host):
"""
confirms installer does not attempt to install EPEL/REMI repositories
on Fedora
"""
package_manager_detect = host.run(
"""
source /opt/pihole/basic-install.sh
package_manager_detect
"""
)
assert package_manager_detect.stdout == ""
epel_package = host.package("epel-release")
assert not epel_package.is_installed

@ -5,4 +5,4 @@ envlist = py3
allowlist_externals = docker allowlist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker buildx build --load --progress plain -f _centos_8.Dockerfile -t pytest_pihole:test_container ../ commands = docker buildx build --load --progress plain -f _centos_8.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py

@ -5,4 +5,4 @@ envlist = py3
allowlist_externals = docker allowlist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker buildx build --load --progress plain -f _centos_9.Dockerfile -t pytest_pihole:test_container ../ commands = docker buildx build --load --progress plain -f _centos_9.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py

@ -5,4 +5,4 @@ envlist = py3
allowlist_externals = docker allowlist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker buildx build --load --progress plain -f _fedora_38.Dockerfile -t pytest_pihole:test_container ../ commands = docker buildx build --load --progress plain -f _fedora_38.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py

@ -5,4 +5,4 @@ envlist = py3
allowlist_externals = docker allowlist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker buildx build --load --progress plain -f _fedora_39.Dockerfile -t pytest_pihole:test_container ../ commands = docker buildx build --load --progress plain -f _fedora_39.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py

Loading…
Cancel
Save