Merge pull request #3321 from pi-hole/release/v5.0

Pi-hole core v5.0
pull/3324/head^2 v5.0
Adam Warner 4 years ago committed by GitHub
commit 4d25f69526
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,4 +0,0 @@
# These are supported funding model platforms
patreon: pihole
custom: https://pi-hole.net/donate

@ -175,7 +175,7 @@ While quite outdated at this point, [this original blog post about Pi-hole](http
----- -----
## Coverage ## Coverage
- [Lifehacker: Turn A Raspberry Pi Into An Ad Blocker With A Single Command](https://www.lifehacker.com.au/2015/02/turn-a-raspberry-pi-into-an-ad-blocker-with-a-single-command/) (Feburary, 2015) - [Lifehacker: Turn A Raspberry Pi Into An Ad Blocker With A Single Command](https://www.lifehacker.com.au/2015/02/turn-a-raspberry-pi-into-an-ad-blocker-with-a-single-command/) (February, 2015)
- [MakeUseOf: Adblock Everywhere: The Raspberry Pi-Hole Way](http://www.makeuseof.com/tag/adblock-everywhere-raspberry-pi-hole-way/) (March, 2015) - [MakeUseOf: Adblock Everywhere: The Raspberry Pi-Hole Way](http://www.makeuseof.com/tag/adblock-everywhere-raspberry-pi-hole-way/) (March, 2015)
- [Catchpoint: Ad-Blocking on Apple iOS9: Valuing the End User Experience](http://blog.catchpoint.com/2015/09/14/ad-blocking-apple/) (September, 2015) - [Catchpoint: Ad-Blocking on Apple iOS9: Valuing the End User Experience](http://blog.catchpoint.com/2015/09/14/ad-blocking-apple/) (September, 2015)
- [Security Now Netcast: Pi-hole](https://www.youtube.com/watch?v=p7-osq_y8i8&t=100m26s) (October, 2015) - [Security Now Netcast: Pi-hole](https://www.youtube.com/watch?v=p7-osq_y8i8&t=100m26s) (October, 2015)

@ -18,9 +18,8 @@
# WITHIN /etc/dnsmasq.d/yourname.conf # # WITHIN /etc/dnsmasq.d/yourname.conf #
############################################################################### ###############################################################################
addn-hosts=/etc/pihole/gravity.list
addn-hosts=/etc/pihole/black.list
addn-hosts=/etc/pihole/local.list addn-hosts=/etc/pihole/local.list
addn-hosts=/etc/pihole/custom.list
domain-needed domain-needed

@ -0,0 +1,113 @@
#!/usr/bin/env bash
# shellcheck disable=SC1090
# Pi-hole: A black hole for Internet advertisements
# (c) 2019 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Updates gravity.db database
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
readonly scriptPath="/etc/.pihole/advanced/Scripts/database_migration/gravity"
upgrade_gravityDB(){
local database piholeDir auditFile version
database="${1}"
piholeDir="${2}"
auditFile="${piholeDir}/auditlog.list"
# Get database version
version="$(sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
if [[ "$version" == "1" ]]; then
# This migration script upgrades the gravity.db file by
# adding the domain_audit table
echo -e " ${INFO} Upgrading gravity database from version 1 to 2"
sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
version=2
# Store audit domains in database table
if [ -e "${auditFile}" ]; then
echo -e " ${INFO} Migrating content of ${auditFile} into new database"
# database_table_from_file is defined in gravity.sh
database_table_from_file "domain_audit" "${auditFile}"
fi
fi
if [[ "$version" == "2" ]]; then
# This migration script upgrades the gravity.db file by
# renaming the regex table to regex_blacklist, and
# creating a new regex_whitelist table + corresponding linking table and views
echo -e " ${INFO} Upgrading gravity database from version 2 to 3"
sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
version=3
fi
if [[ "$version" == "3" ]]; then
# This migration script unifies the formally separated domain
# lists into a single table with a UNIQUE domain constraint
echo -e " ${INFO} Upgrading gravity database from version 3 to 4"
sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
version=4
fi
if [[ "$version" == "4" ]]; then
# This migration script upgrades the gravity and list views
# implementing necessary changes for per-client blocking
echo -e " ${INFO} Upgrading gravity database from version 4 to 5"
sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
version=5
fi
if [[ "$version" == "5" ]]; then
# This migration script upgrades the adlist view
# to return an ID used in gravity.sh
echo -e " ${INFO} Upgrading gravity database from version 5 to 6"
sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
version=6
fi
if [[ "$version" == "6" ]]; then
# This migration script adds a special group with ID 0
# which is automatically associated to all clients not
# having their own group assignments
echo -e " ${INFO} Upgrading gravity database from version 6 to 7"
sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
version=7
fi
if [[ "$version" == "7" ]]; then
# This migration script recreated the group table
# to ensure uniqueness on the group name
# We also add date_added and date_modified columns
echo -e " ${INFO} Upgrading gravity database from version 7 to 8"
sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
version=8
fi
if [[ "$version" == "8" ]]; then
# This migration fixes some issues that were introduced
# in the previous migration script.
echo -e " ${INFO} Upgrading gravity database from version 8 to 9"
sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
version=9
fi
if [[ "$version" == "9" ]]; then
# This migration drops unused tables and creates triggers to remove
# obsolete groups assignments when the linked items are deleted
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
version=10
fi
if [[ "$version" == "10" ]]; then
# This adds timestamp and an optional comment field to the client table
# These fields are only temporary and will be replaces by the columns
# defined in gravity.db.sql during gravity swapping. We add them here
# to keep the copying process generic (needs the same columns in both the
# source and the destination databases).
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
version=11
fi
if [[ "$version" == "11" ]]; then
# Rename group 0 from "Unassociated" to "Default"
echo -e " ${INFO} Upgrading gravity database from version 11 to 12"
sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
version=12
fi
}

@ -0,0 +1,16 @@
.timeout 30000
BEGIN TRANSACTION;
ALTER TABLE client ADD COLUMN date_added INTEGER;
ALTER TABLE client ADD COLUMN date_modified INTEGER;
ALTER TABLE client ADD COLUMN comment TEXT;
CREATE TRIGGER tr_client_update AFTER UPDATE ON client
BEGIN
UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
END;
UPDATE info SET value = 11 WHERE property = 'version';
COMMIT;

@ -0,0 +1,19 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
UPDATE "group" SET name = 'Default' WHERE id = 0;
UPDATE "group" SET description = 'The default group' WHERE id = 0;
DROP TRIGGER IF EXISTS tr_group_zero;
CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
BEGIN
INSERT OR IGNORE INTO "group" (id,enabled,name,description) VALUES (0,1,'Default','The default group');
END;
UPDATE info SET value = 12 WHERE property = 'version';
COMMIT;

@ -0,0 +1,14 @@
.timeout 30000
BEGIN TRANSACTION;
CREATE TABLE domain_audit
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT UNIQUE NOT NULL,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
);
UPDATE info SET value = 2 WHERE property = 'version';
COMMIT;

@ -0,0 +1,65 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
ALTER TABLE regex RENAME TO regex_blacklist;
CREATE TABLE regex_blacklist_by_group
(
regex_blacklist_id INTEGER NOT NULL REFERENCES regex_blacklist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (regex_blacklist_id, group_id)
);
INSERT INTO regex_blacklist_by_group SELECT * FROM regex_by_group;
DROP TABLE regex_by_group;
DROP VIEW vw_regex;
DROP TRIGGER tr_regex_update;
CREATE VIEW vw_regex_blacklist AS SELECT DISTINCT domain
FROM regex_blacklist
LEFT JOIN regex_blacklist_by_group ON regex_blacklist_by_group.regex_blacklist_id = regex_blacklist.id
LEFT JOIN "group" ON "group".id = regex_blacklist_by_group.group_id
WHERE regex_blacklist.enabled = 1 AND (regex_blacklist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY regex_blacklist.id;
CREATE TRIGGER tr_regex_blacklist_update AFTER UPDATE ON regex_blacklist
BEGIN
UPDATE regex_blacklist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
END;
CREATE TABLE regex_whitelist
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT
);
CREATE TABLE regex_whitelist_by_group
(
regex_whitelist_id INTEGER NOT NULL REFERENCES regex_whitelist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (regex_whitelist_id, group_id)
);
CREATE VIEW vw_regex_whitelist AS SELECT DISTINCT domain
FROM regex_whitelist
LEFT JOIN regex_whitelist_by_group ON regex_whitelist_by_group.regex_whitelist_id = regex_whitelist.id
LEFT JOIN "group" ON "group".id = regex_whitelist_by_group.group_id
WHERE regex_whitelist.enabled = 1 AND (regex_whitelist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY regex_whitelist.id;
CREATE TRIGGER tr_regex_whitelist_update AFTER UPDATE ON regex_whitelist
BEGIN
UPDATE regex_whitelist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
END;
UPDATE info SET value = 3 WHERE property = 'version';
COMMIT;

@ -0,0 +1,96 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
CREATE TABLE domainlist
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
type INTEGER NOT NULL DEFAULT 0,
domain TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT
);
ALTER TABLE whitelist ADD COLUMN type INTEGER;
UPDATE whitelist SET type = 0;
INSERT INTO domainlist (type,domain,enabled,date_added,date_modified,comment)
SELECT type,domain,enabled,date_added,date_modified,comment FROM whitelist;
ALTER TABLE blacklist ADD COLUMN type INTEGER;
UPDATE blacklist SET type = 1;
INSERT INTO domainlist (type,domain,enabled,date_added,date_modified,comment)
SELECT type,domain,enabled,date_added,date_modified,comment FROM blacklist;
ALTER TABLE regex_whitelist ADD COLUMN type INTEGER;
UPDATE regex_whitelist SET type = 2;
INSERT INTO domainlist (type,domain,enabled,date_added,date_modified,comment)
SELECT type,domain,enabled,date_added,date_modified,comment FROM regex_whitelist;
ALTER TABLE regex_blacklist ADD COLUMN type INTEGER;
UPDATE regex_blacklist SET type = 3;
INSERT INTO domainlist (type,domain,enabled,date_added,date_modified,comment)
SELECT type,domain,enabled,date_added,date_modified,comment FROM regex_blacklist;
DROP TABLE whitelist_by_group;
DROP TABLE blacklist_by_group;
DROP TABLE regex_whitelist_by_group;
DROP TABLE regex_blacklist_by_group;
CREATE TABLE domainlist_by_group
(
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (domainlist_id, group_id)
);
DROP TRIGGER tr_whitelist_update;
DROP TRIGGER tr_blacklist_update;
DROP TRIGGER tr_regex_whitelist_update;
DROP TRIGGER tr_regex_blacklist_update;
CREATE TRIGGER tr_domainlist_update AFTER UPDATE ON domainlist
BEGIN
UPDATE domainlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
END;
DROP VIEW vw_whitelist;
CREATE VIEW vw_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 0
ORDER BY domainlist.id;
DROP VIEW vw_blacklist;
CREATE VIEW vw_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 1
ORDER BY domainlist.id;
DROP VIEW vw_regex_whitelist;
CREATE VIEW vw_regex_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 2
ORDER BY domainlist.id;
DROP VIEW vw_regex_blacklist;
CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 3
ORDER BY domainlist.id;
UPDATE info SET value = 4 WHERE property = 'version';
COMMIT;

@ -0,0 +1,38 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP TABLE gravity;
CREATE TABLE gravity
(
domain TEXT NOT NULL,
adlist_id INTEGER NOT NULL REFERENCES adlist (id),
PRIMARY KEY(domain, adlist_id)
);
DROP VIEW vw_gravity;
CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
FROM gravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
LEFT JOIN adlist ON adlist.id = gravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
CREATE TABLE client
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
ip TEXT NOL NULL UNIQUE
);
CREATE TABLE client_by_group
(
client_id INTEGER NOT NULL REFERENCES client (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (client_id, group_id)
);
UPDATE info SET value = 5 WHERE property = 'version';
COMMIT;

@ -0,0 +1,18 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP VIEW vw_adlist;
CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
FROM adlist
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY adlist.id;
UPDATE info SET value = 6 WHERE property = 'version';
COMMIT;

@ -0,0 +1,35 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
INSERT OR REPLACE INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
INSERT INTO domainlist_by_group (domainlist_id, group_id) SELECT id, 0 FROM domainlist;
INSERT INTO client_by_group (client_id, group_id) SELECT id, 0 FROM client;
INSERT INTO adlist_by_group (adlist_id, group_id) SELECT id, 0 FROM adlist;
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
BEGIN
INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_client_add AFTER INSERT ON client
BEGIN
INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
BEGIN
INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
BEGIN
INSERT OR REPLACE INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
END;
UPDATE info SET value = 7 WHERE property = 'version';
COMMIT;

@ -0,0 +1,35 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
ALTER TABLE "group" RENAME TO "group__";
CREATE TABLE "group"
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
enabled BOOLEAN NOT NULL DEFAULT 1,
name TEXT UNIQUE NOT NULL,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
description TEXT
);
CREATE TRIGGER tr_group_update AFTER UPDATE ON "group"
BEGIN
UPDATE "group" SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
END;
INSERT OR IGNORE INTO "group" (id,enabled,name,description) SELECT id,enabled,name,description FROM "group__";
DROP TABLE "group__";
CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
BEGIN
INSERT OR IGNORE INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
END;
UPDATE info SET value = 8 WHERE property = 'version';
COMMIT;

@ -0,0 +1,27 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP TRIGGER IF EXISTS tr_group_update;
DROP TRIGGER IF EXISTS tr_group_zero;
PRAGMA legacy_alter_table=ON;
ALTER TABLE "group" RENAME TO "group__";
PRAGMA legacy_alter_table=OFF;
ALTER TABLE "group__" RENAME TO "group";
CREATE TRIGGER tr_group_update AFTER UPDATE ON "group"
BEGIN
UPDATE "group" SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
END;
CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
BEGIN
INSERT OR IGNORE INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
END;
UPDATE info SET value = 9 WHERE property = 'version';
COMMIT;

@ -0,0 +1,29 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP TABLE IF EXISTS whitelist;
DROP TABLE IF EXISTS blacklist;
DROP TABLE IF EXISTS regex_whitelist;
DROP TABLE IF EXISTS regex_blacklist;
CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
BEGIN
DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
END;
CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
BEGIN
DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
END;
CREATE TRIGGER tr_client_delete AFTER DELETE ON client
BEGIN
DELETE FROM client_by_group WHERE client_id = OLD.id;
END;
UPDATE info SET value = 10 WHERE property = 'version';
COMMIT;

@ -11,69 +11,87 @@
# Globals # Globals
basename=pihole basename=pihole
piholeDir=/etc/"${basename}" piholeDir=/etc/"${basename}"
whitelist="${piholeDir}"/whitelist.txt gravityDBfile="${piholeDir}/gravity.db"
blacklist="${piholeDir}"/blacklist.txt
readonly regexlist="/etc/pihole/regex.list"
reload=false reload=false
addmode=true addmode=true
verbose=true verbose=true
wildcard=false wildcard=false
web=false
domList=() domList=()
listMain="" typeId=""
listAlt="" comment=""
declare -i domaincount
domaincount=0
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
source ${colfile} source ${colfile}
# IDs are hard-wired to domain interpretation in the gravity database scheme
# Clients (including FTL) will read them through the corresponding views
readonly whitelist="0"
readonly blacklist="1"
readonly regex_whitelist="2"
readonly regex_blacklist="3"
GetListnameFromTypeId() {
if [[ "$1" == "${whitelist}" ]]; then
echo "whitelist"
elif [[ "$1" == "${blacklist}" ]]; then
echo "blacklist"
elif [[ "$1" == "${regex_whitelist}" ]]; then
echo "regex whitelist"
elif [[ "$1" == "${regex_blacklist}" ]]; then
echo "regex blacklist"
fi
}
helpFunc() { GetListParamFromTypeId() {
if [[ "${listMain}" == "${whitelist}" ]]; then if [[ "${typeId}" == "${whitelist}" ]]; then
param="w" echo "w"
type="white" elif [[ "${typeId}" == "${blacklist}" ]]; then
elif [[ "${listMain}" == "${regexlist}" && "${wildcard}" == true ]]; then echo "b"
param="-wild" elif [[ "${typeId}" == "${regex_whitelist}" && "${wildcard}" == true ]]; then
type="wildcard black" echo "-white-wild"
elif [[ "${listMain}" == "${regexlist}" ]]; then elif [[ "${typeId}" == "${regex_whitelist}" ]]; then
param="-regex" echo "-white-regex"
type="regex black" elif [[ "${typeId}" == "${regex_blacklist}" && "${wildcard}" == true ]]; then
else echo "-wild"
param="b" elif [[ "${typeId}" == "${regex_blacklist}" ]]; then
type="black" echo "-regex"
fi fi
}
helpFunc() {
local listname param
listname="$(GetListnameFromTypeId "${typeId}")"
param="$(GetListParamFromTypeId)"
echo "Usage: pihole -${param} [options] <domain> <domain2 ...> echo "Usage: pihole -${param} [options] <domain> <domain2 ...>
Example: 'pihole -${param} site.com', or 'pihole -${param} site1.com site2.com' Example: 'pihole -${param} site.com', or 'pihole -${param} site1.com site2.com'
${type^}list one or more domains ${listname^} one or more domains
Options: Options:
-d, --delmode Remove domain(s) from the ${type}list -d, --delmode Remove domain(s) from the ${listname}
-nr, --noreload Update ${type}list without refreshing dnsmasq -nr, --noreload Update ${listname} without reloading the DNS server
-q, --quiet Make output less verbose -q, --quiet Make output less verbose
-h, --help Show this help dialog -h, --help Show this help dialog
-l, --list Display all your ${type}listed domains -l, --list Display all your ${listname}listed domains
--nuke Removes all entries in a list" --nuke Removes all entries in a list"
exit 0 exit 0
} }
EscapeRegexp() { ValidateDomain() {
# This way we may safely insert an arbitrary
# string in our regular expressions
# This sed is intentionally executed in three steps to ease maintainability
# The first sed removes any amount of leading dots
echo $* | sed 's/^\.*//' | sed "s/[]\.|$(){}?+*^]/\\\\&/g" | sed "s/\\//\\\\\//g"
}
HandleOther() {
# Convert to lowercase # Convert to lowercase
domain="${1,,}" domain="${1,,}"
# Check validity of domain (don't check for regex entries) # Check validity of domain (don't check for regex entries)
if [[ "${#domain}" -le 253 ]]; then if [[ "${#domain}" -le 253 ]]; then
if [[ "${listMain}" == "${regexlist}" && "${wildcard}" == false ]]; then if [[ ( "${typeId}" == "${regex_blacklist}" || "${typeId}" == "${regex_whitelist}" ) && "${wildcard}" == false ]]; then
validDomain="${domain}" validDomain="${domain}"
else else
validDomain=$(grep -P "^((-|_)*[a-z\\d]((-|_)*[a-z\\d])*(-|_)*)(\\.(-|_)*([a-z\\d]((-|_)*[a-z\\d])*))*$" <<< "${domain}") # Valid chars check validDomain=$(grep -P "^((-|_)*[a-z\\d]((-|_)*[a-z\\d])*(-|_)*)(\\.(-|_)*([a-z\\d]((-|_)*[a-z\\d])*))*$" <<< "${domain}") # Valid chars check
@ -82,194 +100,182 @@ HandleOther() {
fi fi
if [[ -n "${validDomain}" ]]; then if [[ -n "${validDomain}" ]]; then
domList=("${domList[@]}" ${validDomain}) domList=("${domList[@]}" "${validDomain}")
else else
echo -e " ${CROSS} ${domain} is not a valid argument or domain name!" echo -e " ${CROSS} ${domain} is not a valid argument or domain name!"
fi fi
}
PoplistFile() { domaincount=$((domaincount+1))
# Check whitelist file exists, and if not, create it }
if [[ ! -f "${whitelist}" ]]; then
touch "${whitelist}"
fi
# Check blacklist file exists, and if not, create it
if [[ ! -f "${blacklist}" ]]; then
touch "${blacklist}"
fi
ProcessDomainList() {
for dom in "${domList[@]}"; do for dom in "${domList[@]}"; do
# Logic: If addmode then add to desired list and remove from the other; if delmode then remove from desired list but do not add to the other # Format domain into regex filter if requested
if [[ "${wildcard}" == true ]]; then
dom="(^|\\.)${dom//\./\\.}$"
fi
# Logic: If addmode then add to desired list and remove from the other;
# if delmode then remove from desired list but do not add to the other
if ${addmode}; then if ${addmode}; then
AddDomain "${dom}" "${listMain}" AddDomain "${dom}"
RemoveDomain "${dom}" "${listAlt}"
else else
RemoveDomain "${dom}" "${listMain}" RemoveDomain "${dom}"
fi fi
done done
} }
AddDomain() { AddDomain() {
list="$2" local domain num requestedListname existingTypeId existingListname
domain=$(EscapeRegexp "$1") domain="$1"
[[ "${list}" == "${whitelist}" ]] && listname="whitelist" # Is the domain in the list we want to add it to?
[[ "${list}" == "${blacklist}" ]] && listname="blacklist" num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
requestedListname="$(GetListnameFromTypeId "${typeId}")"
if [[ "${list}" == "${whitelist}" || "${list}" == "${blacklist}" ]]; then
[[ "${list}" == "${whitelist}" && -z "${type}" ]] && type="--whitelist-only" if [[ "${num}" -ne 0 ]]; then
[[ "${list}" == "${blacklist}" && -z "${type}" ]] && type="--blacklist-only" existingTypeId="$(sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
bool=true if [[ "${existingTypeId}" == "${typeId}" ]]; then
# Is the domain in the list we want to add it to? if [[ "${verbose}" == true ]]; then
grep -Ex -q "${domain}" "${list}" > /dev/null 2>&1 || bool=false echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
if [[ "${bool}" == false ]]; then
# Domain not found in the whitelist file, add it!
if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} Adding ${1} to ${listname}..."
fi
reload=true
# Add it to the list we want to add it to
echo "$1" >> "${list}"
else
if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} ${1} already exists in ${listname}, no need to add!"
fi
fi fi
elif [[ "${list}" == "${regexlist}" ]]; then else
[[ -z "${type}" ]] && type="--wildcard-only" existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
bool=true sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
domain="${1}" if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
[[ "${wildcard}" == true ]] && domain="(^|\\.)${domain//\./\\.}$"
# Is the domain in the list?
# Search only for exactly matching lines
grep -Fx "${domain}" "${regexlist}" > /dev/null 2>&1 || bool=false
if [[ "${bool}" == false ]]; then
if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} Adding ${domain} to regex list..."
fi
reload="restart"
echo "$domain" >> "${regexlist}"
else
if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} ${domain} already exists in regex list, no need to add!"
fi
fi fi
fi
return
fi
# Domain not found in the table, add it!
if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} Adding ${domain} to the ${requestedListname}..."
fi
reload=true
# Insert only the domain here. The enabled and date_added fields will be filled
# with their default values (enabled = true, date_added = current timestamp)
if [[ -z "${comment}" ]]; then
sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
else
# also add comment when variable has been set through the "--comment" option
sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
fi fi
} }
RemoveDomain() { RemoveDomain() {
list="$2" local domain num requestedListname
domain=$(EscapeRegexp "$1") domain="$1"
[[ "${list}" == "${whitelist}" ]] && listname="whitelist" # Is the domain in the list we want to remove it from?
[[ "${list}" == "${blacklist}" ]] && listname="blacklist" num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
if [[ "${list}" == "${whitelist}" || "${list}" == "${blacklist}" ]]; then requestedListname="$(GetListnameFromTypeId "${typeId}")"
bool=true
[[ "${list}" == "${whitelist}" && -z "${type}" ]] && type="--whitelist-only" if [[ "${num}" -eq 0 ]]; then
[[ "${list}" == "${blacklist}" && -z "${type}" ]] && type="--blacklist-only" if [[ "${verbose}" == true ]]; then
# Is it in the list? Logic follows that if its whitelisted it should not be blacklisted and vice versa echo -e " ${INFO} ${domain} does not exist in ${requestedListname}, no need to remove!"
grep -Ex -q "${domain}" "${list}" > /dev/null 2>&1 || bool=false fi
if [[ "${bool}" == true ]]; then return
# Remove it from the other one
echo -e " ${INFO} Removing $1 from ${listname}..."
# /I flag: search case-insensitive
sed -i "/${domain}/Id" "${list}"
reload=true
else
if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} ${1} does not exist in ${listname}, no need to remove!"
fi
fi
elif [[ "${list}" == "${regexlist}" ]]; then
[[ -z "${type}" ]] && type="--wildcard-only"
domain="${1}"
[[ "${wildcard}" == true ]] && domain="(^|\\.)${domain//\./\\.}$"
bool=true
# Is it in the list?
grep -Fx "${domain}" "${regexlist}" > /dev/null 2>&1 || bool=false
if [[ "${bool}" == true ]]; then
# Remove it from the other one
echo -e " ${INFO} Removing $domain from regex list..."
local lineNumber
lineNumber=$(grep -Fnx "$domain" "${list}" | cut -f1 -d:)
sed -i "${lineNumber}d" "${list}"
reload=true
else
if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} ${domain} does not exist in regex list, no need to remove!"
fi
fi
fi fi
}
# Update Gravity # Domain found in the table, remove it!
Reload() { if [[ "${verbose}" == true ]]; then
echo "" echo -e " ${INFO} Removing ${domain} from the ${requestedListname}..."
pihole -g --skip-download "${type:-}" fi
reload=true
# Remove it from the current list
sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
} }
Displaylist() { Displaylist() {
if [[ -f ${listMain} ]]; then local count num_pipes domain enabled status nicedate requestedListname
if [[ "${listMain}" == "${whitelist}" ]]; then
string="gravity resistant domains" requestedListname="$(GetListnameFromTypeId "${typeId}")"
else data="$(sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
string="domains caught in the sinkhole"
fi if [[ -z $data ]]; then
verbose=false echo -e "Not showing empty list"
echo -e "Displaying $string:\n" else
echo -e "Displaying ${requestedListname}:"
count=1 count=1
while IFS= read -r RD || [ -n "${RD}" ]; do while IFS= read -r line
echo " ${count}: ${RD}" do
# Count number of pipes seen in this line
# This is necessary because we can only detect the pipe separating the fields
# from the end backwards as the domain (which is the first field) may contain
# pipe symbols as they are perfectly valid regex filter control characters
num_pipes="$(grep -c "^" <<< "$(grep -o "|" <<< "${line}")")"
# Extract domain and enabled status based on the obtained number of pipe characters
domain="$(cut -d'|' -f"-$((num_pipes-1))" <<< "${line}")"
enabled="$(cut -d'|' -f"$((num_pipes))" <<< "${line}")"
datemod="$(cut -d'|' -f"$((num_pipes+1))" <<< "${line}")"
# Translate boolean status into human readable string
if [[ "${enabled}" -eq 1 ]]; then
status="enabled"
else
status="disabled"
fi
# Get nice representation of numerical date stored in database
nicedate=$(date --rfc-2822 -d "@${datemod}")
echo " ${count}: ${domain} (${status}, last modified ${nicedate})"
count=$((count+1)) count=$((count+1))
done < "${listMain}" done <<< "${data}"
else
echo -e " ${COL_LIGHT_RED}${listMain} does not exist!${COL_NC}"
fi fi
exit 0; exit 0;
} }
NukeList() { NukeList() {
if [[ -f "${listMain}" ]]; then sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
# Back up original list }
cp "${listMain}" "${listMain}.bck~"
# Empty out file GetComment() {
echo "" > "${listMain}" comment="$1"
if [[ "${comment}" =~ [^a-zA-Z0-9_\#:/\.,\ -] ]]; then
echo " ${CROSS} Found invalid characters in domain comment!"
exit
fi fi
} }
for var in "$@"; do while (( "$#" )); do
case "${var}" in case "${1}" in
"-w" | "whitelist" ) listMain="${whitelist}"; listAlt="${blacklist}";; "-w" | "whitelist" ) typeId=0;;
"-b" | "blacklist" ) listMain="${blacklist}"; listAlt="${whitelist}";; "-b" | "blacklist" ) typeId=1;;
"--wild" | "wildcard" ) listMain="${regexlist}"; wildcard=true;; "--white-regex" | "white-regex" ) typeId=2;;
"--regex" | "regex" ) listMain="${regexlist}";; "--white-wild" | "white-wild" ) typeId=2; wildcard=true;;
"--wild" | "wildcard" ) typeId=3; wildcard=true;;
"--regex" | "regex" ) typeId=3;;
"-nr"| "--noreload" ) reload=false;; "-nr"| "--noreload" ) reload=false;;
"-d" | "--delmode" ) addmode=false;; "-d" | "--delmode" ) addmode=false;;
"-q" | "--quiet" ) verbose=false;; "-q" | "--quiet" ) verbose=false;;
"-h" | "--help" ) helpFunc;; "-h" | "--help" ) helpFunc;;
"-l" | "--list" ) Displaylist;; "-l" | "--list" ) Displaylist;;
"--nuke" ) NukeList;; "--nuke" ) NukeList;;
* ) HandleOther "${var}";; "--web" ) web=true;;
"--comment" ) GetComment "${2}"; shift;;
* ) ValidateDomain "${1}";;
esac esac
shift
done done
shift shift
if [[ $# = 0 ]]; then if [[ ${domaincount} == 0 ]]; then
helpFunc helpFunc
fi fi
PoplistFile ProcessDomainList
# Used on web interface
if $web; then
echo "DONE"
fi
if [[ "${reload}" != false ]]; then if [[ "${reload}" != false ]]; then
# Ensure that "restart" is used for Wildcard updates pihole restartdns reload-lists
Reload "${reload}"
fi fi

@ -0,0 +1,66 @@
#!/usr/bin/env bash
# shellcheck disable=SC1090
# Pi-hole: A black hole for Internet advertisements
# (c) 2019 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# ARP table interaction
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
coltable="/opt/pihole/COL_TABLE"
if [[ -f ${coltable} ]]; then
source ${coltable}
fi
# Determine database location
# Obtain DBFILE=... setting from pihole-FTL.db
# Constructed to return nothing when
# a) the setting is not present in the config file, or
# b) the setting is commented out (e.g. "#DBFILE=...")
FTLconf="/etc/pihole/pihole-FTL.conf"
if [ -e "$FTLconf" ]; then
DBFILE="$(sed -n -e 's/^\s*DBFILE\s*=\s*//p' ${FTLconf})"
fi
# Test for empty string. Use standard path in this case.
if [ -z "$DBFILE" ]; then
DBFILE="/etc/pihole/pihole-FTL.db"
fi
flushARP(){
local output
if [[ "${args[1]}" != "quiet" ]]; then
echo -ne " ${INFO} Flushing network table ..."
fi
# Truncate network_addresses table in pihole-FTL.db
# This needs to be done before we can truncate the network table due to
# foreign key contraints
if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table"
echo " Database location: ${DBFILE}"
echo " Output: ${output}"
return 1
fi
# Truncate network table in pihole-FTL.db
if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
echo -e "${OVER} ${CROSS} Failed to truncate network table"
echo " Database location: ${DBFILE}"
echo " Output: ${output}"
return 1
fi
if [[ "${args[1]}" != "quiet" ]]; then
echo -e "${OVER} ${TICK} Flushed network table"
fi
}
args=("$@")
case "${args[0]}" in
"arpflush" ) flushARP;;
esac

@ -95,6 +95,7 @@ checkout() {
local path local path
path="development/${binary}" path="development/${binary}"
echo "development" > /etc/pihole/ftlbranch echo "development" > /etc/pihole/ftlbranch
chmod 644 /etc/pihole/ftlbranch
elif [[ "${1}" == "master" ]] ; then elif [[ "${1}" == "master" ]] ; then
# Shortcut to check out master branches # Shortcut to check out master branches
echo -e " ${INFO} Shortcut \"master\" detected - checking out master branches..." echo -e " ${INFO} Shortcut \"master\" detected - checking out master branches..."
@ -108,6 +109,7 @@ checkout() {
local path local path
path="master/${binary}" path="master/${binary}"
echo "master" > /etc/pihole/ftlbranch echo "master" > /etc/pihole/ftlbranch
chmod 644 /etc/pihole/ftlbranch
elif [[ "${1}" == "core" ]] ; then elif [[ "${1}" == "core" ]] ; then
str="Fetching branches from ${piholeGitUrl}" str="Fetching branches from ${piholeGitUrl}"
echo -ne " ${INFO} $str" echo -ne " ${INFO} $str"
@ -169,6 +171,7 @@ checkout() {
if check_download_exists "$path"; then if check_download_exists "$path"; then
echo " ${TICK} Branch ${2} exists" echo " ${TICK} Branch ${2} exists"
echo "${2}" > /etc/pihole/ftlbranch echo "${2}" > /etc/pihole/ftlbranch
chmod 644 /etc/pihole/ftlbranch
FTLinstall "${binary}" FTLinstall "${binary}"
restart_service pihole-FTL restart_service pihole-FTL
enable_service pihole-FTL enable_service pihole-FTL

@ -89,16 +89,40 @@ PIHOLE_WILDCARD_CONFIG_FILE="${DNSMASQ_D_DIRECTORY}/03-wildcard.conf"
WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf" WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf"
#WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf" #WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf"
PIHOLE_DEFAULT_AD_LISTS="${PIHOLE_DIRECTORY}/adlists.default"
PIHOLE_USER_DEFINED_AD_LISTS="${PIHOLE_DIRECTORY}/adlists.list"
PIHOLE_BLACKLIST_FILE="${PIHOLE_DIRECTORY}/blacklist.txt"
PIHOLE_BLOCKLIST_FILE="${PIHOLE_DIRECTORY}/gravity.list"
PIHOLE_INSTALL_LOG_FILE="${PIHOLE_DIRECTORY}/install.log" PIHOLE_INSTALL_LOG_FILE="${PIHOLE_DIRECTORY}/install.log"
PIHOLE_RAW_BLOCKLIST_FILES="${PIHOLE_DIRECTORY}/list.*" PIHOLE_RAW_BLOCKLIST_FILES="${PIHOLE_DIRECTORY}/list.*"
PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list" PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list"
PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate" PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf" PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf"
PIHOLE_WHITELIST_FILE="${PIHOLE_DIRECTORY}/whitelist.txt" PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf"
# Read the value of an FTL config key. The value is printed to stdout.
#
# Args:
# 1. The key to read
# 2. The default if the setting or config does not exist
get_ftl_conf_value() {
local key=$1
local default=$2
local value
# Obtain key=... setting from pihole-FTL.conf
if [[ -e "$PIHOLE_FTL_CONF_FILE" ]]; then
# Constructed to return nothing when
# a) the setting is not present in the config file, or
# b) the setting is commented out (e.g. "#DBFILE=...")
value="$(sed -n -e "s/^\\s*$key=\\s*//p" ${PIHOLE_FTL_CONF_FILE})"
fi
# Test for missing value. Use default value in this case.
if [[ -z "$value" ]]; then
value="$default"
fi
echo "$value"
}
PIHOLE_GRAVITY_DB_FILE="$(get_ftl_conf_value "GRAVITYDB" "${PIHOLE_DIRECTORY}/gravity.db")"
PIHOLE_COMMAND="${BIN_DIRECTORY}/pihole" PIHOLE_COMMAND="${BIN_DIRECTORY}/pihole"
PIHOLE_COLTABLE_FILE="${BIN_DIRECTORY}/COL_TABLE" PIHOLE_COLTABLE_FILE="${BIN_DIRECTORY}/COL_TABLE"
@ -109,7 +133,7 @@ FTL_PORT="${RUN_DIRECTORY}/pihole-FTL.port"
PIHOLE_LOG="${LOG_DIRECTORY}/pihole.log" PIHOLE_LOG="${LOG_DIRECTORY}/pihole.log"
PIHOLE_LOG_GZIPS="${LOG_DIRECTORY}/pihole.log.[0-9].*" PIHOLE_LOG_GZIPS="${LOG_DIRECTORY}/pihole.log.[0-9].*"
PIHOLE_DEBUG_LOG="${LOG_DIRECTORY}/pihole_debug.log" PIHOLE_DEBUG_LOG="${LOG_DIRECTORY}/pihole_debug.log"
PIHOLE_FTL_LOG="${LOG_DIRECTORY}/pihole-FTL.log" PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/pihole-FTL.log")"
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log" PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log"
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log" PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log"
@ -142,16 +166,11 @@ REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
"${PIHOLE_DHCP_CONFIG_FILE}" "${PIHOLE_DHCP_CONFIG_FILE}"
"${PIHOLE_WILDCARD_CONFIG_FILE}" "${PIHOLE_WILDCARD_CONFIG_FILE}"
"${WEB_SERVER_CONFIG_FILE}" "${WEB_SERVER_CONFIG_FILE}"
"${PIHOLE_DEFAULT_AD_LISTS}"
"${PIHOLE_USER_DEFINED_AD_LISTS}"
"${PIHOLE_BLACKLIST_FILE}"
"${PIHOLE_BLOCKLIST_FILE}"
"${PIHOLE_INSTALL_LOG_FILE}" "${PIHOLE_INSTALL_LOG_FILE}"
"${PIHOLE_RAW_BLOCKLIST_FILES}" "${PIHOLE_RAW_BLOCKLIST_FILES}"
"${PIHOLE_LOCAL_HOSTS_FILE}" "${PIHOLE_LOCAL_HOSTS_FILE}"
"${PIHOLE_LOGROTATE_FILE}" "${PIHOLE_LOGROTATE_FILE}"
"${PIHOLE_SETUP_VARS_FILE}" "${PIHOLE_SETUP_VARS_FILE}"
"${PIHOLE_WHITELIST_FILE}"
"${PIHOLE_COMMAND}" "${PIHOLE_COMMAND}"
"${PIHOLE_COLTABLE_FILE}" "${PIHOLE_COLTABLE_FILE}"
"${FTL_PID}" "${FTL_PID}"
@ -795,7 +814,7 @@ dig_at() {
# This helps emulate queries to different domains that a user might query # This helps emulate queries to different domains that a user might query
# It will also give extra assurance that Pi-hole is correctly resolving and blocking domains # It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
local random_url local random_url
random_url=$(shuf -n 1 "${PIHOLE_BLOCKLIST_FILE}") random_url=$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1")
# First, do a dig on localhost to see if Pi-hole can use itself to block a domain # First, do a dig on localhost to see if Pi-hole can use itself to block a domain
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @${local_address} +short "${record_type}"); then if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @${local_address} +short "${record_type}"); then
@ -977,8 +996,7 @@ list_files_in_dir() {
if [[ -d "${dir_to_parse}/${each_file}" ]]; then if [[ -d "${dir_to_parse}/${each_file}" ]]; then
# If it's a directoy, do nothing # If it's a directoy, do nothing
: :
elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_BLOCKLIST_FILE}" ]] || \ elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_INSTALL_LOG_FILE}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_INSTALL_LOG_FILE}" ]] || \
[[ "${dir_to_parse}/${each_file}" == "${PIHOLE_SETUP_VARS_FILE}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_SETUP_VARS_FILE}" ]] || \
@ -1063,31 +1081,71 @@ head_tail_log() {
IFS="$OLD_IFS" IFS="$OLD_IFS"
} }
analyze_gravity_list() { show_db_entries() {
echo_current_diagnostic "Gravity list" local title="${1}"
local head_line local query="${2}"
local tail_line local widths="${3}"
# Put the current Internal Field Separator into another variable so it can be restored later
echo_current_diagnostic "${title}"
OLD_IFS="$IFS" OLD_IFS="$IFS"
# Get the lines that are in the file(s) and store them in an array for parsing later
IFS=$'\r\n' IFS=$'\r\n'
local entries=()
mapfile -t entries < <(\
sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
-cmd ".headers on" \
-cmd ".mode column" \
-cmd ".width ${widths}" \
"${query}"\
)
for line in "${entries[@]}"; do
log_write " ${line}"
done
IFS="$OLD_IFS"
}
show_groups() {
show_db_entries "Groups" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,name,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,description FROM \"group\"" "4 7 50 19 19 50"
}
show_adlists() {
show_db_entries "Adlists" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(adlist_by_group.group_id) group_ids,address,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM adlist LEFT JOIN adlist_by_group ON adlist.id = adlist_by_group.adlist_id GROUP BY id;" "4 7 12 100 19 19 50"
}
show_domainlist() {
show_db_entries "Domainlist (0/1 = exact white-/blacklist, 2/3 = regex white-/blacklist)" "SELECT id,CASE type WHEN '0' THEN '0 ' WHEN '1' THEN ' 1 ' WHEN '2' THEN ' 2 ' WHEN '3' THEN ' 3' ELSE type END type,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(domainlist_by_group.group_id) group_ids,domain,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist LEFT JOIN domainlist_by_group ON domainlist.id = domainlist_by_group.domainlist_id GROUP BY id;" "4 4 7 12 100 19 19 50"
}
show_clients() {
show_db_entries "Clients" "SELECT id,GROUP_CONCAT(client_by_group.group_id) group_ids,ip,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM client LEFT JOIN client_by_group ON client.id = client_by_group.client_id GROUP BY id;" "4 12 100 19 19 50"
}
analyze_gravity_list() {
echo_current_diagnostic "Gravity List and Database"
local gravity_permissions local gravity_permissions
gravity_permissions=$(ls -ld "${PIHOLE_BLOCKLIST_FILE}") gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}" log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
local gravity_head=()
mapfile -t gravity_head < <(head -n 4 ${PIHOLE_BLOCKLIST_FILE}) show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
log_write " ${COL_CYAN}-----head of $(basename ${PIHOLE_BLOCKLIST_FILE})------${COL_NC}" gravity_updated_raw="$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
for head_line in "${gravity_head[@]}"; do gravity_updated="$(date -d @"${gravity_updated_raw}")"
log_write " ${head_line}" log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
done
log_write "" log_write ""
local gravity_tail=()
mapfile -t gravity_tail < <(tail -n 4 ${PIHOLE_BLOCKLIST_FILE}) OLD_IFS="$IFS"
log_write " ${COL_CYAN}-----tail of $(basename ${PIHOLE_BLOCKLIST_FILE})------${COL_NC}" IFS=$'\r\n'
for tail_line in "${gravity_tail[@]}"; do local gravity_sample=()
log_write " ${tail_line}" mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
for line in "${gravity_sample[@]}"; do
log_write " ${line}"
done done
# Set the IFS back to what it was
log_write ""
IFS="$OLD_IFS" IFS="$OLD_IFS"
} }
@ -1238,6 +1296,10 @@ process_status
parse_setup_vars parse_setup_vars
check_x_headers check_x_headers
analyze_gravity_list analyze_gravity_list
show_groups
show_domainlist
show_clients
show_adlists
show_content_of_pihole_files show_content_of_pihole_files
parse_locale parse_locale
analyze_pihole_log analyze_pihole_log

@ -39,8 +39,9 @@ if [[ "$@" == *"once"* ]]; then
# Note that moving the file is not an option, as # Note that moving the file is not an option, as
# dnsmasq would happily continue writing into the # dnsmasq would happily continue writing into the
# moved file (it will have the same file handler) # moved file (it will have the same file handler)
cp /var/log/pihole.log /var/log/pihole.log.1 cp -p /var/log/pihole.log /var/log/pihole.log.1
echo " " > /var/log/pihole.log echo " " > /var/log/pihole.log
chmod 644 /var/log/pihole.log
fi fi
else else
# Manual flushing # Manual flushing
@ -53,6 +54,7 @@ else
echo " " > /var/log/pihole.log echo " " > /var/log/pihole.log
if [ -f /var/log/pihole.log.1 ]; then if [ -f /var/log/pihole.log.1 ]; then
echo " " > /var/log/pihole.log.1 echo " " > /var/log/pihole.log.1
chmod 644 /var/log/pihole.log.1
fi fi
fi fi
# Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history) # Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history)

@ -11,10 +11,8 @@
# Globals # Globals
piholeDir="/etc/pihole" piholeDir="/etc/pihole"
adListsList="$piholeDir/adlists.list" gravityDBfile="${piholeDir}/gravity.db"
wildcardlist="/etc/dnsmasq.d/03-pihole-wildcard.conf"
options="$*" options="$*"
adlist=""
all="" all=""
exact="" exact=""
blockpage="" blockpage=""
@ -23,27 +21,10 @@ matchType="match"
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
source "${colfile}" source "${colfile}"
# Print each subdomain
# e.g: foo.bar.baz.com = "foo.bar.baz.com bar.baz.com baz.com com"
processWildcards() {
IFS="." read -r -a array <<< "${1}"
for (( i=${#array[@]}-1; i>=0; i-- )); do
ar=""
for (( j=${#array[@]}-1; j>${#array[@]}-i-2; j-- )); do
if [[ $j == $((${#array[@]}-1)) ]]; then
ar="${array[$j]}"
else
ar="${array[$j]}.${ar}"
fi
done
echo "${ar}"
done
}
# Scan an array of files for matching strings # Scan an array of files for matching strings
scanList(){ scanList(){
# Escape full stops # Escape full stops
local domain="${1//./\\.}" lists="${2}" type="${3:-}" local domain="${1}" esc_domain="${1//./\\.}" lists="${2}" type="${3:-}"
# Prevent grep from printing file path # Prevent grep from printing file path
cd "$piholeDir" || exit 1 cd "$piholeDir" || exit 1
@ -52,11 +33,18 @@ scanList(){
export LC_CTYPE=C export LC_CTYPE=C
# /dev/null forces filename to be printed when only one list has been generated # /dev/null forces filename to be printed when only one list has been generated
# shellcheck disable=SC2086
case "${type}" in case "${type}" in
"exact" ) grep -i -E "(^|\\s)${domain}($|\\s|#)" ${lists} /dev/null 2>/dev/null;; "exact" ) grep -i -E -l "(^|(?<!#)\\s)${esc_domain}($|\\s|#)" ${lists} /dev/null 2>/dev/null;;
"wc" ) grep -i -o -m 1 "/${domain}/" ${lists} 2>/dev/null;; # Iterate through each regexp and check whether it matches the domainQuery
* ) grep -i "${domain}" ${lists} /dev/null 2>/dev/null;; # If it does, print the matching regexp and continue looping
# Input 1 - regexps | Input 2 - domainQuery
"regex" )
for list in ${lists}; do
if [[ "${domain}" =~ ${list} ]]; then
printf "%b\n" "${list}";
fi
done;;
* ) grep -i "${esc_domain}" ${lists} /dev/null 2>/dev/null;;
esac esac
} }
@ -66,23 +54,16 @@ Example: 'pihole -q -exact domain.com'
Query the adlists for a specified domain Query the adlists for a specified domain
Options: Options:
-adlist Print the name of the block list URL
-exact Search the block lists for exact domain matches -exact Search the block lists for exact domain matches
-all Return all query matches within a block list -all Return all query matches within a block list
-h, --help Show this help dialog" -h, --help Show this help dialog"
exit 0 exit 0
fi fi
if [[ ! -e "$adListsList" ]]; then
echo -e "${COL_LIGHT_RED}The file $adListsList was not found${COL_NC}"
exit 1
fi
# Handle valid options # Handle valid options
if [[ "${options}" == *"-bp"* ]]; then if [[ "${options}" == *"-bp"* ]]; then
exact="exact"; blockpage=true exact="exact"; blockpage=true
else else
[[ "${options}" == *"-adlist"* ]] && adlist=true
[[ "${options}" == *"-all"* ]] && all=true [[ "${options}" == *"-all"* ]] && all=true
if [[ "${options}" == *"-exact"* ]]; then if [[ "${options}" == *"-exact"* ]]; then
exact="exact"; matchType="exact ${matchType}" exact="exact"; matchType="exact ${matchType}"
@ -107,69 +88,115 @@ if [[ -n "${str:-}" ]]; then
exit 1 exit 1
fi fi
# Scan Whitelist and Blacklist scanDatabaseTable() {
lists="whitelist.txt blacklist.txt" local domain table type querystr result extra
mapfile -t results <<< "$(scanList "${domainQuery}" "${lists}" "${exact}")" domain="$(printf "%q" "${1}")"
if [[ -n "${results[*]}" ]]; then table="${2}"
type="${3:-}"
# As underscores are legitimate parts of domains, we escape them when using the LIKE operator.
# Underscores are SQLite wildcards matching exactly one character. We obviously want to suppress this
# behavior. The "ESCAPE '\'" clause specifies that an underscore preceded by an '\' should be matched
# as a literal underscore character. We pretreat the $domain variable accordingly to escape underscores.
if [[ "${table}" == "gravity" ]]; then
case "${exact}" in
"exact" ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain = '${domain}'";;
* ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
esac
else
case "${exact}" in
"exact" ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain = '${domain}'";;
* ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
esac
fi
# Send prepared query to gravity database
result="$(sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null
if [[ -z "${result}" ]]; then
# Return early when there are no matches in this table
return
fi
if [[ "${table}" == "gravity" ]]; then
echo "${result}"
return
fi
# Mark domain as having been white-/blacklist matched (global variable)
wbMatch=true wbMatch=true
# Loop through each result in order to print unique file title once
# Print table name
if [[ -z "${blockpage}" ]]; then
echo " ${matchType^} found in ${COL_BOLD}exact ${table}${COL_NC}"
fi
# Loop over results and print them
mapfile -t results <<< "${result}"
for result in "${results[@]}"; do for result in "${results[@]}"; do
fileName="${result%%.*}"
if [[ -n "${blockpage}" ]]; then if [[ -n "${blockpage}" ]]; then
echo "π ${result}" echo "π ${result}"
exit 0 exit 0
elif [[ -n "${exact}" ]]; then fi
echo " ${matchType^} found in ${COL_BOLD}${fileName^}${COL_NC}" domain="${result/|*}"
if [[ "${result#*|}" == "0" ]]; then
extra=" (disabled)"
else else
# Only print filename title once per file extra=""
if [[ ! "${fileName}" == "${fileName_prev:-}" ]]; then
echo " ${matchType^} found in ${COL_BOLD}${fileName^}${COL_NC}"
fileName_prev="${fileName}"
fi
echo " ${result#*:}"
fi fi
echo " ${domain}${extra}"
done done
fi }
# Scan Wildcards scanRegexDatabaseTable() {
if [[ -e "${wildcardlist}" ]]; then local domain list
# Determine all subdomains, domain and TLDs domain="${1}"
mapfile -t wildcards <<< "$(processWildcards "${domainQuery}")" list="${2}"
for match in "${wildcards[@]}"; do type="${3:-}"
# Search wildcard list for matches
mapfile -t results <<< "$(scanList "${match}" "${wildcardlist}" "wc")" # Query all regex from the corresponding database tables
if [[ -n "${results[*]}" ]]; then mapfile -t regexList < <(sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${type}" 2> /dev/null)
if [[ -z "${wcMatch:-}" ]] && [[ -z "${blockpage}" ]]; then
# If we have regexps to process
if [[ "${#regexList[@]}" -ne 0 ]]; then
# Split regexps over a new line
str_regexList=$(printf '%s\n' "${regexList[@]}")
# Check domain against regexps
mapfile -t regexMatches < <(scanList "${domain}" "${str_regexList}" "regex")
# If there were regex matches
if [[ "${#regexMatches[@]}" -ne 0 ]]; then
# Split matching regexps over a new line
str_regexMatches=$(printf '%s\n' "${regexMatches[@]}")
# Form a "matched" message
str_message="${matchType^} found in ${COL_BOLD}regex ${list}${COL_NC}"
# Form a "results" message
str_result="${COL_BOLD}${str_regexMatches}${COL_NC}"
# If we are displaying more than just the source of the block
if [[ -z "${blockpage}" ]]; then
# Set the wildcard match flag
wcMatch=true wcMatch=true
echo " ${matchType^} found in ${COL_BOLD}Wildcards${COL_NC}:" # Echo the "matched" message, indented by one space
echo " ${str_message}"
# Echo the "results" message, each line indented by three spaces
# shellcheck disable=SC2001
echo "${str_result}" | sed 's/^/ /'
else
echo "π .wildcard"
exit 0
fi fi
case "${blockpage}" in
true ) echo "π ${wildcardlist##*/}"; exit 0;;
* ) echo " *.${match}";;
esac
fi fi
done fi
fi }
# Scan Whitelist and Blacklist
scanDatabaseTable "${domainQuery}" "whitelist" "0"
scanDatabaseTable "${domainQuery}" "blacklist" "1"
# Get version sorted *.domains filenames (without dir path) # Scan Regex table
lists=("$(cd "$piholeDir" || exit 0; printf "%s\\n" -- *.domains | sort -V)") scanRegexDatabaseTable "${domainQuery}" "whitelist" "2"
scanRegexDatabaseTable "${domainQuery}" "blacklist" "3"
# Query blocklists for occurences of domain
mapfile -t results <<< "$(scanList "${domainQuery}" "${lists[*]}" "${exact}")" # Query block lists
mapfile -t results <<< "$(scanDatabaseTable "${domainQuery}" "gravity")"
# Remove unwanted content from $results
# Each line in $results is formatted as such: [fileName]:[line]
# 1. Delete lines starting with #
# 2. Remove comments after domain
# 3. Remove hosts format IP address
# 4. Remove any lines that no longer contain the queried domain name (in case the matched domain name was in a comment)
esc_domain="${domainQuery//./\\.}"
mapfile -t results <<< "$(IFS=$'\n'; sed \
-e "/:#/d" \
-e "s/[ \\t]#.*//g" \
-e "s/:.*[ \\t]/:/g" \
-e "/${esc_domain}/!d" \
<<< "${results[*]}")"
# Handle notices # Handle notices
if [[ -z "${wbMatch:-}" ]] && [[ -z "${wcMatch:-}" ]] && [[ -z "${results[*]}" ]]; then if [[ -z "${wbMatch:-}" ]] && [[ -z "${wcMatch:-}" ]] && [[ -z "${results[*]}" ]]; then
@ -184,15 +211,6 @@ elif [[ -z "${all}" ]] && [[ "${#results[*]}" -ge 100 ]]; then
exit 0 exit 0
fi fi
# Get adlist file content as array
if [[ -n "${adlist}" ]] || [[ -n "${blockpage}" ]]; then
for adlistUrl in $(< "${adListsList}"); do
if [[ "${adlistUrl:0:4}" =~ (http|www.) ]]; then
adlists+=("${adlistUrl}")
fi
done
fi
# Print "Exact matches for" title # Print "Exact matches for" title
if [[ -n "${exact}" ]] && [[ -z "${blockpage}" ]]; then if [[ -n "${exact}" ]] && [[ -z "${blockpage}" ]]; then
plural=""; [[ "${#results[*]}" -gt 1 ]] && plural="es" plural=""; [[ "${#results[*]}" -gt 1 ]] && plural="es"
@ -200,28 +218,25 @@ if [[ -n "${exact}" ]] && [[ -z "${blockpage}" ]]; then
fi fi
for result in "${results[@]}"; do for result in "${results[@]}"; do
fileName="${result/:*/}" match="${result/|*/}"
extra="${result#*|}"
# Determine *.domains URL using filename's number adlistAddress="${extra/|*/}"
if [[ -n "${adlist}" ]] || [[ -n "${blockpage}" ]]; then extra="${extra#*|}"
fileNum="${fileName/list./}"; fileNum="${fileNum%%.*}" if [[ "${extra}" == "0" ]]; then
fileName="${adlists[$fileNum]}" extra="(disabled)"
else
# Discrepency occurs when adlists has been modified, but Gravity has not been run extra=""
if [[ -z "${fileName}" ]]; then
fileName="${COL_LIGHT_RED}(no associated adlists URL found)${COL_NC}"
fi
fi fi
if [[ -n "${blockpage}" ]]; then if [[ -n "${blockpage}" ]]; then
echo "${fileNum} ${fileName}" echo "0 ${adlistAddress}"
elif [[ -n "${exact}" ]]; then elif [[ -n "${exact}" ]]; then
echo " ${fileName}" echo " - ${adlistAddress} ${extra}"
else else
if [[ ! "${fileName}" == "${fileName_prev:-}" ]]; then if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
count="" count=""
echo " ${matchType^} found in ${COL_BOLD}${fileName}${COL_NC}:" echo " ${matchType^} found in ${COL_BOLD}${adlistAddress}${COL_NC}:"
fileName_prev="${fileName}" adlistAddress_prev="${adlistAddress}"
fi fi
: $((count++)) : $((count++))
@ -231,7 +246,7 @@ for result in "${results[@]}"; do
[[ "${count}" -gt "${max_count}" ]] && continue [[ "${count}" -gt "${max_count}" ]] && continue
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}" echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}"
else else
echo " ${result#*:}" echo " ${match} ${extra}"
fi fi
fi fi
done done

@ -51,6 +51,7 @@ if [[ "$2" == "remote" ]]; then
GITHUB_CORE_VERSION="$(json_extract tag_name "$(curl -s 'https://api.github.com/repos/pi-hole/pi-hole/releases/latest' 2> /dev/null)")" GITHUB_CORE_VERSION="$(json_extract tag_name "$(curl -s 'https://api.github.com/repos/pi-hole/pi-hole/releases/latest' 2> /dev/null)")"
echo -n "${GITHUB_CORE_VERSION}" > "${GITHUB_VERSION_FILE}" echo -n "${GITHUB_CORE_VERSION}" > "${GITHUB_VERSION_FILE}"
chmod 644 "${GITHUB_VERSION_FILE}"
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
GITHUB_WEB_VERSION="$(json_extract tag_name "$(curl -s 'https://api.github.com/repos/pi-hole/AdminLTE/releases/latest' 2> /dev/null)")" GITHUB_WEB_VERSION="$(json_extract tag_name "$(curl -s 'https://api.github.com/repos/pi-hole/AdminLTE/releases/latest' 2> /dev/null)")"
@ -66,6 +67,7 @@ else
CORE_BRANCH="$(get_local_branch /etc/.pihole)" CORE_BRANCH="$(get_local_branch /etc/.pihole)"
echo -n "${CORE_BRANCH}" > "${LOCAL_BRANCH_FILE}" echo -n "${CORE_BRANCH}" > "${LOCAL_BRANCH_FILE}"
chmod 644 "${LOCAL_BRANCH_FILE}"
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
WEB_BRANCH="$(get_local_branch /var/www/html/admin)" WEB_BRANCH="$(get_local_branch /var/www/html/admin)"
@ -79,6 +81,7 @@ else
CORE_VERSION="$(get_local_version /etc/.pihole)" CORE_VERSION="$(get_local_version /etc/.pihole)"
echo -n "${CORE_VERSION}" > "${LOCAL_VERSION_FILE}" echo -n "${CORE_VERSION}" > "${LOCAL_VERSION_FILE}"
chmod 644 "${LOCAL_VERSION_FILE}"
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
WEB_VERSION="$(get_local_version /var/www/html/admin)" WEB_VERSION="$(get_local_version /var/www/html/admin)"

@ -84,6 +84,21 @@ getRemoteVersion(){
# Get the version from the remote origin # Get the version from the remote origin
local daemon="${1}" local daemon="${1}"
local version local version
local cachedVersions
local arrCache
cachedVersions="/etc/pihole/GitHubVersions"
#If the above file exists, then we can read from that. Prevents overuse of Github API
if [[ -f "$cachedVersions" ]]; then
IFS=' ' read -r -a arrCache < "$cachedVersions"
case $daemon in
"pi-hole" ) echo "${arrCache[0]}";;
"AdminLTE" ) echo "${arrCache[1]}";;
"FTL" ) echo "${arrCache[2]}";;
esac
return 0
fi
version=$(curl --silent --fail "https://api.github.com/repos/pi-hole/${daemon}/releases/latest" | \ version=$(curl --silent --fail "https://api.github.com/repos/pi-hole/${daemon}/releases/latest" | \
awk -F: '$1 ~/tag_name/ { print $2 }' | \ awk -F: '$1 ~/tag_name/ { print $2 }' | \
@ -97,22 +112,48 @@ getRemoteVersion(){
return 0 return 0
} }
getLocalBranch(){
# Get the checked out branch of the local directory
local directory="${1}"
local branch
# Local FTL btranch is stored in /etc/pihole/ftlbranch
if [[ "$1" == "FTL" ]]; then
branch="$(pihole-FTL branch)"
else
cd "${directory}" 2> /dev/null || { echo "${DEFAULT}"; return 1; }
branch=$(git rev-parse --abbrev-ref HEAD || echo "$DEFAULT")
fi
if [[ ! "${branch}" =~ ^v ]]; then
if [[ "${branch}" == "master" ]]; then
echo ""
elif [[ "${branch}" == "HEAD" ]]; then
echo "in detached HEAD state at "
else
echo "${branch} "
fi
else
# Branch started in "v"
echo "release "
fi
return 0
}
versionOutput() { versionOutput() {
[[ "$1" == "pi-hole" ]] && GITDIR=$COREGITDIR [[ "$1" == "pi-hole" ]] && GITDIR=$COREGITDIR
[[ "$1" == "AdminLTE" ]] && GITDIR=$WEBGITDIR [[ "$1" == "AdminLTE" ]] && GITDIR=$WEBGITDIR
[[ "$1" == "FTL" ]] && GITDIR="FTL" [[ "$1" == "FTL" ]] && GITDIR="FTL"
[[ "$2" == "-c" ]] || [[ "$2" == "--current" ]] || [[ -z "$2" ]] && current=$(getLocalVersion $GITDIR) [[ "$2" == "-c" ]] || [[ "$2" == "--current" ]] || [[ -z "$2" ]] && current=$(getLocalVersion $GITDIR) && branch=$(getLocalBranch $GITDIR)
[[ "$2" == "-l" ]] || [[ "$2" == "--latest" ]] || [[ -z "$2" ]] && latest=$(getRemoteVersion "$1") [[ "$2" == "-l" ]] || [[ "$2" == "--latest" ]] || [[ -z "$2" ]] && latest=$(getRemoteVersion "$1")
if [[ "$2" == "-h" ]] || [[ "$2" == "--hash" ]]; then if [[ "$2" == "-h" ]] || [[ "$2" == "--hash" ]]; then
[[ "$3" == "-c" ]] || [[ "$3" == "--current" ]] || [[ -z "$3" ]] && curHash=$(getLocalHash "$GITDIR") [[ "$3" == "-c" ]] || [[ "$3" == "--current" ]] || [[ -z "$3" ]] && curHash=$(getLocalHash "$GITDIR") && branch=$(getLocalBranch $GITDIR)
[[ "$3" == "-l" ]] || [[ "$3" == "--latest" ]] || [[ -z "$3" ]] && latHash=$(getRemoteHash "$1" "$(cd "$GITDIR" 2> /dev/null && git rev-parse --abbrev-ref HEAD)") [[ "$3" == "-l" ]] || [[ "$3" == "--latest" ]] || [[ -z "$3" ]] && latHash=$(getRemoteHash "$1" "$(cd "$GITDIR" 2> /dev/null && git rev-parse --abbrev-ref HEAD)")
fi fi
if [[ -n "$current" ]] && [[ -n "$latest" ]]; then if [[ -n "$current" ]] && [[ -n "$latest" ]]; then
output="${1^} version is $current (Latest: $latest)" output="${1^} version is $branch$current (Latest: $latest)"
elif [[ -n "$current" ]] && [[ -z "$latest" ]]; then elif [[ -n "$current" ]] && [[ -z "$latest" ]]; then
output="Current ${1^} version is $current" output="Current ${1^} version is $branch$current."
elif [[ -z "$current" ]] && [[ -n "$latest" ]]; then elif [[ -z "$current" ]] && [[ -n "$latest" ]]; then
output="Latest ${1^} version is $latest" output="Latest ${1^} version is $latest"
elif [[ "$curHash" == "N/A" ]] || [[ "$latHash" == "N/A" ]]; then elif [[ "$curHash" == "N/A" ]] || [[ "$latHash" == "N/A" ]]; then

@ -17,6 +17,9 @@ readonly FTLconf="/etc/pihole/pihole-FTL.conf"
# 03 -> wildcards # 03 -> wildcards
readonly dhcpstaticconfig="/etc/dnsmasq.d/04-pihole-static-dhcp.conf" readonly dhcpstaticconfig="/etc/dnsmasq.d/04-pihole-static-dhcp.conf"
readonly PI_HOLE_BIN_DIR="/usr/local/bin" readonly PI_HOLE_BIN_DIR="/usr/local/bin"
readonly dnscustomfile="/etc/pihole/custom.list"
readonly gravityDBfile="/etc/pihole/gravity.db"
coltable="/opt/pihole/COL_TABLE" coltable="/opt/pihole/COL_TABLE"
if [[ -f ${coltable} ]]; then if [[ -f ${coltable} ]]; then
@ -33,7 +36,6 @@ Options:
-c, celsius Set Celsius as preferred temperature unit -c, celsius Set Celsius as preferred temperature unit
-f, fahrenheit Set Fahrenheit as preferred temperature unit -f, fahrenheit Set Fahrenheit as preferred temperature unit
-k, kelvin Set Kelvin as preferred temperature unit -k, kelvin Set Kelvin as preferred temperature unit
-r, hostrecord Add a name to the DNS associated to an IPv4/IPv6 address
-e, email Set an administrative contact address for the Block Page -e, email Set an administrative contact address for the Block Page
-h, --help Show this help dialog -h, --help Show this help dialog
-i, interface Specify dnsmasq's interface listening behavior -i, interface Specify dnsmasq's interface listening behavior
@ -86,9 +88,9 @@ SetTemperatureUnit() {
HashPassword() { HashPassword() {
# Compute password hash twice to avoid rainbow table vulnerability # Compute password hash twice to avoid rainbow table vulnerability
return=$(echo -n ${1} | sha256sum | sed 's/\s.*$//') return=$(echo -n "${1}" | sha256sum | sed 's/\s.*$//')
return=$(echo -n ${return} | sha256sum | sed 's/\s.*$//') return=$(echo -n "${return}" | sha256sum | sed 's/\s.*$//')
echo ${return} echo "${return}"
} }
SetWebPassword() { SetWebPassword() {
@ -142,18 +144,18 @@ ProcessDNSSettings() {
delete_dnsmasq_setting "server" delete_dnsmasq_setting "server"
COUNTER=1 COUNTER=1
while [[ 1 ]]; do while true ; do
var=PIHOLE_DNS_${COUNTER} var=PIHOLE_DNS_${COUNTER}
if [ -z "${!var}" ]; then if [ -z "${!var}" ]; then
break; break;
fi fi
add_dnsmasq_setting "server" "${!var}" add_dnsmasq_setting "server" "${!var}"
let COUNTER=COUNTER+1 (( COUNTER++ ))
done done
# The option LOCAL_DNS_PORT is deprecated # The option LOCAL_DNS_PORT is deprecated
# We apply it once more, and then convert it into the current format # We apply it once more, and then convert it into the current format
if [ ! -z "${LOCAL_DNS_PORT}" ]; then if [ -n "${LOCAL_DNS_PORT}" ]; then
add_dnsmasq_setting "server" "127.0.0.1#${LOCAL_DNS_PORT}" add_dnsmasq_setting "server" "127.0.0.1#${LOCAL_DNS_PORT}"
add_setting "PIHOLE_DNS_${COUNTER}" "127.0.0.1#${LOCAL_DNS_PORT}" add_setting "PIHOLE_DNS_${COUNTER}" "127.0.0.1#${LOCAL_DNS_PORT}"
delete_setting "LOCAL_DNS_PORT" delete_setting "LOCAL_DNS_PORT"
@ -176,14 +178,13 @@ ProcessDNSSettings() {
if [[ "${DNSSEC}" == true ]]; then if [[ "${DNSSEC}" == true ]]; then
echo "dnssec echo "dnssec
trust-anchor=.,19036,8,2,49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5
trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D
" >> "${dnsmasqconfig}" " >> "${dnsmasqconfig}"
fi fi
delete_dnsmasq_setting "host-record" delete_dnsmasq_setting "host-record"
if [ ! -z "${HOSTRECORD}" ]; then if [ -n "${HOSTRECORD}" ]; then
add_dnsmasq_setting "host-record" "${HOSTRECORD}" add_dnsmasq_setting "host-record" "${HOSTRECORD}"
fi fi
@ -328,6 +329,7 @@ dhcp-option=option:router,${DHCP_ROUTER}
dhcp-leasefile=/etc/pihole/dhcp.leases dhcp-leasefile=/etc/pihole/dhcp.leases
#quiet-dhcp #quiet-dhcp
" > "${dhcpconfig}" " > "${dhcpconfig}"
chmod 644 "${dhcpconfig}"
if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then
echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}" echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}"
@ -398,22 +400,38 @@ SetWebUILayout() {
change_setting "WEBUIBOXEDLAYOUT" "${args[2]}" change_setting "WEBUIBOXEDLAYOUT" "${args[2]}"
} }
CheckUrl(){
local regex
# Check for characters NOT allowed in URLs
regex="[^a-zA-Z0-9:/?&%=~._-]"
if [[ "${1}" =~ ${regex} ]]; then
return 1
else
return 0
fi
}
CustomizeAdLists() { CustomizeAdLists() {
list="/etc/pihole/adlists.list" local address
address="${args[3]}"
if [[ "${args[2]}" == "enable" ]]; then local comment
sed -i "\\@${args[3]}@s/^#http/http/g" "${list}" comment="${args[4]}"
elif [[ "${args[2]}" == "disable" ]]; then
sed -i "\\@${args[3]}@s/^http/#http/g" "${list}" if CheckUrl "${address}"; then
elif [[ "${args[2]}" == "add" ]]; then if [[ "${args[2]}" == "enable" ]]; then
if [[ $(grep -c "^${args[3]}$" "${list}") -eq 0 ]] ; then sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
echo "${args[3]}" >> ${list} elif [[ "${args[2]}" == "disable" ]]; then
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
elif [[ "${args[2]}" == "add" ]]; then
sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
elif [[ "${args[2]}" == "del" ]]; then
sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
else
echo "Not permitted"
return 1
fi fi
elif [[ "${args[2]}" == "del" ]]; then
var=$(echo "${args[3]}" | sed 's/\//\\\//g')
sed -i "/${var}/Id" "${list}"
else else
echo "Not permitted" echo "Invalid Url"
return 1 return 1
fi fi
} }
@ -459,32 +477,6 @@ RemoveDHCPStaticAddress() {
sed -i "/dhcp-host=${mac}.*/d" "${dhcpstaticconfig}" sed -i "/dhcp-host=${mac}.*/d" "${dhcpstaticconfig}"
} }
SetHostRecord() {
if [[ "${1}" == "-h" ]] || [[ "${1}" == "--help" ]]; then
echo "Usage: pihole -a hostrecord <domain> [IPv4-address],[IPv6-address]
Example: 'pihole -a hostrecord home.domain.com 192.168.1.1,2001:db8:a0b:12f0::1'
Add a name to the DNS associated to an IPv4/IPv6 address
Options:
\"\" Empty: Remove host record
-h, --help Show this help dialog"
exit 0
fi
if [[ -n "${args[3]}" ]]; then
change_setting "HOSTRECORD" "${args[2]},${args[3]}"
echo -e " ${TICK} Setting host record for ${args[2]} to ${args[3]}"
else
change_setting "HOSTRECORD" ""
echo -e " ${TICK} Removing host record"
fi
ProcessDNSSettings
# Restart dnsmasq to load new configuration
RestartDNS
}
SetAdminEmail() { SetAdminEmail() {
if [[ "${1}" == "-h" ]] || [[ "${1}" == "--help" ]]; then if [[ "${1}" == "-h" ]] || [[ "${1}" == "--help" ]]; then
echo "Usage: pihole -a email <address> echo "Usage: pihole -a email <address>
@ -498,6 +490,16 @@ Options:
fi fi
if [[ -n "${args[2]}" ]]; then if [[ -n "${args[2]}" ]]; then
# Sanitize email address in case of security issues
# Regex from https://stackoverflow.com/a/2138832/4065967
local regex
regex="^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\$"
if [[ ! "${args[2]}" =~ ${regex} ]]; then
echo -e " ${CROSS} Invalid email address"
exit 0
fi
change_setting "ADMIN_EMAIL" "${args[2]}" change_setting "ADMIN_EMAIL" "${args[2]}"
echo -e " ${TICK} Setting admin contact to ${args[2]}" echo -e " ${TICK} Setting admin contact to ${args[2]}"
else else
@ -523,10 +525,10 @@ Interfaces:
fi fi
if [[ "${args[2]}" == "all" ]]; then if [[ "${args[2]}" == "all" ]]; then
echo -e " ${INFO} Listening on all interfaces, permiting all origins. Please use a firewall!" echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
change_setting "DNSMASQ_LISTENING" "all" change_setting "DNSMASQ_LISTENING" "all"
elif [[ "${args[2]}" == "local" ]]; then elif [[ "${args[2]}" == "local" ]]; then
echo -e " ${INFO} Listening on all interfaces, permiting origins from one hop away (LAN)" echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
change_setting "DNSMASQ_LISTENING" "local" change_setting "DNSMASQ_LISTENING" "local"
else else
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}" echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
@ -543,23 +545,50 @@ Interfaces:
} }
Teleporter() { Teleporter() {
local datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S") local datetimestamp
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
php /var/www/html/admin/scripts/pi-hole/php/teleporter.php > "pi-hole-teleporter_${datetimestamp}.tar.gz" php /var/www/html/admin/scripts/pi-hole/php/teleporter.php > "pi-hole-teleporter_${datetimestamp}.tar.gz"
} }
checkDomain()
{
local domain validDomain
# Convert to lowercase
domain="${1,,}"
validDomain=$(grep -P "^((-|_)*[a-z\\d]((-|_)*[a-z\\d])*(-|_)*)(\\.(-|_)*([a-z\\d]((-|_)*[a-z\\d])*))*$" <<< "${domain}") # Valid chars check
validDomain=$(grep -P "^[^\\.]{1,63}(\\.[^\\.]{1,63})*$" <<< "${validDomain}") # Length of each label
echo "${validDomain}"
}
addAudit() addAudit()
{ {
shift # skip "-a" shift # skip "-a"
shift # skip "audit" shift # skip "audit"
for var in "$@" local domains validDomain
domains=""
for domain in "$@"
do do
echo "${var}" >> /etc/pihole/auditlog.list # Check domain to be added. Only continue if it is valid
validDomain="$(checkDomain "${domain}")"
if [[ -n "${validDomain}" ]]; then
# Put comma in between domains when there is
# more than one domains to be added
# SQL INSERT allows adding multiple rows at once using the format
## INSERT INTO table (domain) VALUES ('abc.de'),('fgh.ij'),('klm.no'),('pqr.st');
if [[ -n "${domains}" ]]; then
domains="${domains},"
fi
domains="${domains}('${domain}')"
fi
done done
# Insert only the domain here. The date_added field will be
# filled with its default value (date_added = current timestamp)
sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
} }
clearAudit() clearAudit()
{ {
echo -n "" > /etc/pihole/auditlog.list sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;"
} }
SetPrivacyLevel() { SetPrivacyLevel() {
@ -569,6 +598,28 @@ SetPrivacyLevel() {
fi fi
} }
AddCustomDNSAddress() {
echo -e " ${TICK} Adding custom DNS entry..."
ip="${args[2]}"
host="${args[3]}"
echo "${ip} ${host}" >> "${dnscustomfile}"
# Restart dnsmasq to load new custom DNS entries
RestartDNS
}
RemoveCustomDNSAddress() {
echo -e " ${TICK} Removing custom DNS entry..."
ip="${args[2]}"
host="${args[3]}"
sed -i "/${ip} ${host}/d" "${dnscustomfile}"
# Restart dnsmasq to update removed custom DNS entries
RestartDNS
}
main() { main() {
args=("$@") args=("$@")
@ -592,7 +643,6 @@ main() {
"resolve" ) ResolutionSettings;; "resolve" ) ResolutionSettings;;
"addstaticdhcp" ) AddDHCPStaticAddress;; "addstaticdhcp" ) AddDHCPStaticAddress;;
"removestaticdhcp" ) RemoveDHCPStaticAddress;; "removestaticdhcp" ) RemoveDHCPStaticAddress;;
"-r" | "hostrecord" ) SetHostRecord "$3";;
"-e" | "email" ) SetAdminEmail "$3";; "-e" | "email" ) SetAdminEmail "$3";;
"-i" | "interface" ) SetListeningMode "$@";; "-i" | "interface" ) SetListeningMode "$@";;
"-t" | "teleporter" ) Teleporter;; "-t" | "teleporter" ) Teleporter;;
@ -600,6 +650,8 @@ main() {
"audit" ) addAudit "$@";; "audit" ) addAudit "$@";;
"clearaudit" ) clearAudit;; "clearaudit" ) clearAudit;;
"-l" | "privacylevel" ) SetPrivacyLevel;; "-l" | "privacylevel" ) SetPrivacyLevel;;
"addcustomdns" ) AddCustomDNSAddress;;
"removecustomdns" ) RemoveCustomDNSAddress;;
* ) helpFunc;; * ) helpFunc;;
esac esac

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net) # (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware. # Network-wide ad blocking via your own hardware.

@ -0,0 +1,188 @@
PRAGMA foreign_keys=OFF;
BEGIN TRANSACTION;
CREATE TABLE "group"
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
enabled BOOLEAN NOT NULL DEFAULT 1,
name TEXT UNIQUE NOT NULL,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
description TEXT
);
INSERT INTO "group" (id,enabled,name,description) VALUES (0,1,'Default','The default group');
CREATE TABLE domainlist
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
type INTEGER NOT NULL DEFAULT 0,
domain TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT
);
CREATE TABLE adlist
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
address TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT
);
CREATE TABLE adlist_by_group
(
adlist_id INTEGER NOT NULL REFERENCES adlist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (adlist_id, group_id)
);
CREATE TABLE gravity
(
domain TEXT NOT NULL,
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
);
CREATE TABLE info
(
property TEXT PRIMARY KEY,
value TEXT NOT NULL
);
INSERT INTO "info" VALUES('version','12');
CREATE TABLE domain_audit
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT UNIQUE NOT NULL,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
);
CREATE TABLE domainlist_by_group
(
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (domainlist_id, group_id)
);
CREATE TABLE client
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
ip TEXT NOL NULL UNIQUE,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT
);
CREATE TABLE client_by_group
(
client_id INTEGER NOT NULL REFERENCES client (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (client_id, group_id)
);
CREATE TRIGGER tr_adlist_update AFTER UPDATE ON adlist
BEGIN
UPDATE adlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE address = NEW.address;
END;
CREATE TRIGGER tr_client_update AFTER UPDATE ON client
BEGIN
UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE ip = NEW.ip;
END;
CREATE TRIGGER tr_domainlist_update AFTER UPDATE ON domainlist
BEGIN
UPDATE domainlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
END;
CREATE VIEW vw_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 0
ORDER BY domainlist.id;
CREATE VIEW vw_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 1
ORDER BY domainlist.id;
CREATE VIEW vw_regex_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 2
ORDER BY domainlist.id;
CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 3
ORDER BY domainlist.id;
CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
FROM gravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
LEFT JOIN adlist ON adlist.id = gravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
FROM adlist
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY adlist.id;
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
BEGIN
INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_client_add AFTER INSERT ON client
BEGIN
INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
BEGIN
INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_group_update AFTER UPDATE ON "group"
BEGIN
UPDATE "group" SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
END;
CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
BEGIN
INSERT OR IGNORE INTO "group" (id,enabled,name) VALUES (0,1,'Default');
END;
CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
BEGIN
DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
END;
CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
BEGIN
DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
END;
CREATE TRIGGER tr_client_delete AFTER DELETE ON client
BEGIN
DELETE FROM client_by_group WHERE client_id = OLD.id;
END;
COMMIT;

@ -0,0 +1,42 @@
.timeout 30000
ATTACH DATABASE '/etc/pihole/gravity.db' AS OLD;
BEGIN TRANSACTION;
DROP TRIGGER tr_domainlist_add;
DROP TRIGGER tr_client_add;
DROP TRIGGER tr_adlist_add;
INSERT OR REPLACE INTO "group" SELECT * FROM OLD."group";
INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
BEGIN
INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_client_add AFTER INSERT ON client
BEGIN
INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
BEGIN
INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
END;
COMMIT;

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
### BEGIN INIT INFO ### BEGIN INIT INFO
# Provides: pihole-FTL # Provides: pihole-FTL
# Required-Start: $remote_fs $syslog # Required-Start: $remote_fs $syslog
@ -48,7 +48,8 @@ start() {
chown pihole:pihole /etc/pihole /etc/pihole/dhcp.leases 2> /dev/null chown pihole:pihole /etc/pihole /etc/pihole/dhcp.leases 2> /dev/null
chown pihole:pihole /var/log/pihole-FTL.log /var/log/pihole.log chown pihole:pihole /var/log/pihole-FTL.log /var/log/pihole.log
chmod 0644 /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log chmod 0644 /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log
echo "nameserver 127.0.0.1" | /sbin/resolvconf -a lo.piholeFTL # Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
chown pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db 2> /dev/null
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN+eip "$(which pihole-FTL)"; then if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN+eip "$(which pihole-FTL)"; then
su -s /bin/sh -c "/usr/bin/pihole-FTL" "$FTLUSER" su -s /bin/sh -c "/usr/bin/pihole-FTL" "$FTLUSER"
else else
@ -62,7 +63,6 @@ start() {
# Stop the service # Stop the service
stop() { stop() {
if is_running; then if is_running; then
/sbin/resolvconf -d lo.piholeFTL
kill "$(get_pid)" kill "$(get_pid)"
for i in {1..5}; do for i in {1..5}; do
if ! is_running; then if ! is_running; then

@ -7,7 +7,7 @@ _pihole() {
case "${prev}" in case "${prev}" in
"pihole") "pihole")
opts="admin blacklist checkout chronometer debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard whitelist" opts="admin blacklist checkout chronometer debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard whitelist arpflush"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;; ;;
"whitelist"|"blacklist"|"wildcard"|"regex") "whitelist"|"blacklist"|"wildcard"|"regex")
@ -15,7 +15,7 @@ _pihole() {
COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) )
;; ;;
"admin") "admin")
opts_admin="celsius email fahrenheit hostrecord interface kelvin password privacylevel" opts_admin="celsius email fahrenheit interface kelvin password privacylevel"
COMPREPLY=( $(compgen -W "${opts_admin}" -- ${cur}) ) COMPREPLY=( $(compgen -W "${opts_admin}" -- ${cur}) )
;; ;;
"checkout") "checkout")

@ -6,8 +6,8 @@
* This file is copyright under the latest version of the EUPL. * This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */ * Please see LICENSE file for your rights under this license. */
// Sanitise HTTP_HOST output // Sanitize SERVER_NAME output
$serverName = htmlspecialchars($_SERVER["HTTP_HOST"]); $serverName = htmlspecialchars($_SERVER["SERVER_NAME"]);
// Remove external ipv6 brackets if any // Remove external ipv6 brackets if any
$serverName = preg_replace('/^\[(.*)\]$/', '${1}', $serverName); $serverName = preg_replace('/^\[(.*)\]$/', '${1}', $serverName);
@ -50,16 +50,24 @@ function setHeader($type = "x") {
} }
// Determine block page type // Determine block page type
if ($serverName === "pi.hole") { if ($serverName === "pi.hole"
|| (!empty($_SERVER["VIRTUAL_HOST"]) && $serverName === $_SERVER["VIRTUAL_HOST"])) {
// Redirect to Web Interface // Redirect to Web Interface
exit(header("Location: /admin")); exit(header("Location: /admin"));
} elseif (filter_var($serverName, FILTER_VALIDATE_IP) || in_array($serverName, $authorizedHosts)) { } elseif (filter_var($serverName, FILTER_VALIDATE_IP) || in_array($serverName, $authorizedHosts)) {
// Set Splash Page output // Set Splash Page output
$splashPage = " $splashPage = "
<html><head> <html>
<head>
$viewPort $viewPort
<link rel='stylesheet' href='/pihole/blockingpage.css' type='text/css'/> <link rel='stylesheet' href='pihole/blockingpage.css' type='text/css'/>
</head><body id='splashpage'><img src='/admin/img/logo.svg'/><br/>Pi-<b>hole</b>: Your black hole for Internet advertisements<br><a href='/admin'>Did you mean to go to the admin panel?</a></body></html> </head>
<body id='splashpage'>
<img src='admin/img/logo.svg'/><br/>
Pi-<b>hole</b>: Your black hole for Internet advertisements<br/>
<a href='/admin'>Did you mean to go to the admin panel?</a>
</body>
</html>
"; ";
// Set splash/landing page based off presence of $landPage // Set splash/landing page based off presence of $landPage
@ -68,7 +76,7 @@ if ($serverName === "pi.hole") {
// Unset variables so as to not be included in $landPage // Unset variables so as to not be included in $landPage
unset($serverName, $svPasswd, $svEmail, $authorizedHosts, $validExtTypes, $currentUrlExt, $viewPort); unset($serverName, $svPasswd, $svEmail, $authorizedHosts, $validExtTypes, $currentUrlExt, $viewPort);
// Render splash/landing page when directly browsing via IP or authorised hostname // Render splash/landing page when directly browsing via IP or authorized hostname
exit($renderPage); exit($renderPage);
} elseif ($currentUrlExt === "js") { } elseif ($currentUrlExt === "js") {
// Serve Pi-hole Javascript for blocked domains requesting JS // Serve Pi-hole Javascript for blocked domains requesting JS
@ -96,26 +104,30 @@ if ($serverName === "pi.hole") {
// Define admin email address text based off $svEmail presence // Define admin email address text based off $svEmail presence
$bpAskAdmin = !empty($svEmail) ? '<a href="mailto:'.$svEmail.'?subject=Site Blocked: '.$serverName.'"></a>' : "<span/>"; $bpAskAdmin = !empty($svEmail) ? '<a href="mailto:'.$svEmail.'?subject=Site Blocked: '.$serverName.'"></a>' : "<span/>";
// Determine if at least one block list has been generated // Get possible non-standard location of FTL's database
$blocklistglob = glob("/etc/pihole/list.0.*.domains"); $FTLsettings = parse_ini_file("/etc/pihole/pihole-FTL.conf");
if ($blocklistglob === array()) { if (isset($FTLsettings["GRAVITYDB"])) {
die("[ERROR] There are no domain lists generated lists within <code>/etc/pihole/</code>! Please update gravity by running <code>pihole -g</code>, or repair Pi-hole using <code>pihole -r</code>."); $gravityDBFile = $FTLsettings["GRAVITYDB"];
} else {
$gravityDBFile = "/etc/pihole/gravity.db";
} }
// Set location of adlists file // Connect to gravity.db
if (is_file("/etc/pihole/adlists.list")) { try {
$adLists = "/etc/pihole/adlists.list"; $db = new SQLite3($gravityDBFile, SQLITE3_OPEN_READONLY);
} elseif (is_file("/etc/pihole/adlists.default")) { } catch (Exception $exception) {
$adLists = "/etc/pihole/adlists.default"; die("[ERROR]: Failed to connect to gravity.db");
} else {
die("[ERROR] File not found: <code>/etc/pihole/adlists.list</code>");
} }
// Get all URLs starting with "http" or "www" from adlists and re-index array numerically // Get all adlist addresses
$adlistsUrls = array_values(preg_grep("/(^http)|(^www)/i", file($adLists, FILE_IGNORE_NEW_LINES))); $adlistResults = $db->query("SELECT address FROM vw_adlist");
$adlistsUrls = array();
while ($row = $adlistResults->fetchArray()) {
array_push($adlistsUrls, $row[0]);
}
if (empty($adlistsUrls)) if (empty($adlistsUrls))
die("[ERROR]: There are no adlist URL's found within <code>$adLists</code>"); die("[ERROR]: There are no adlists enabled");
// Get total number of blocklists (Including Whitelist, Blacklist & Wildcard lists) // Get total number of blocklists (Including Whitelist, Blacklist & Wildcard lists)
$adlistsCount = count($adlistsUrls) + 3; $adlistsCount = count($adlistsUrls) + 3;
@ -127,7 +139,12 @@ ini_set("default_socket_timeout", 3);
function queryAds($serverName) { function queryAds($serverName) {
// Determine the time it takes while querying adlists // Determine the time it takes while querying adlists
$preQueryTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"]; $preQueryTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"];
$queryAds = file("http://127.0.0.1/admin/scripts/pi-hole/php/queryads.php?domain=$serverName&bp", FILE_IGNORE_NEW_LINES); $queryAdsURL = sprintf(
"http://127.0.0.1:%s/admin/scripts/pi-hole/php/queryads.php?domain=%s&bp",
$_SERVER["SERVER_PORT"],
$serverName
);
$queryAds = file($queryAdsURL, FILE_IGNORE_NEW_LINES);
$queryAds = array_values(array_filter(preg_replace("/data:\s+/", "", $queryAds))); $queryAds = array_values(array_filter(preg_replace("/data:\s+/", "", $queryAds)));
$queryTime = sprintf("%.0f", (microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"]) - $preQueryTime); $queryTime = sprintf("%.0f", (microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"]) - $preQueryTime);
@ -205,7 +222,7 @@ $phVersion = exec("cd /etc/.pihole/ && git describe --long --tags");
if (explode("-", $phVersion)[1] != "0") if (explode("-", $phVersion)[1] != "0")
$execTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"]; $execTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"];
// Please Note: Text is added via CSS to allow an admin to provide a localised // Please Note: Text is added via CSS to allow an admin to provide a localized
// language without the need to edit this file // language without the need to edit this file
setHeader(); setHeader();
@ -222,10 +239,10 @@ setHeader();
<?=$viewPort ?> <?=$viewPort ?>
<meta name="robots" content="noindex,nofollow"/> <meta name="robots" content="noindex,nofollow"/>
<meta http-equiv="x-dns-prefetch-control" content="off"> <meta http-equiv="x-dns-prefetch-control" content="off">
<link rel="shortcut icon" href="//pi.hole/admin/img/favicon.png" type="image/x-icon"/> <link rel="shortcut icon" href="admin/img/favicon.png" type="image/x-icon"/>
<link rel="stylesheet" href="//pi.hole/pihole/blockingpage.css" type="text/css"/> <link rel="stylesheet" href="pihole/blockingpage.css" type="text/css"/>
<title><?=$serverName ?></title> <title><?=$serverName ?></title>
<script src="//pi.hole/admin/scripts/vendor/jquery.min.js"></script> <script src="admin/scripts/vendor/jquery.min.js"></script>
<script> <script>
window.onload = function () { window.onload = function () {
<?php <?php

@ -70,7 +70,6 @@ PI_HOLE_BLOCKPAGE_DIR="${webroot}/pihole"
useUpdateVars=false useUpdateVars=false
adlistFile="/etc/pihole/adlists.list" adlistFile="/etc/pihole/adlists.list"
regexFile="/etc/pihole/regex.list"
# Pi-hole needs an IP address; to begin, these variables are empty since we don't know what the IP is until # Pi-hole needs an IP address; to begin, these variables are empty since we don't know what the IP is until
# this script can run # this script can run
IPV4_ADDRESS="" IPV4_ADDRESS=""
@ -124,7 +123,7 @@ done
# If the color table file exists, # If the color table file exists,
if [[ -f "${coltable}" ]]; then if [[ -f "${coltable}" ]]; then
# source it # source it
source ${coltable} source "${coltable}"
# Otherwise, # Otherwise,
else else
# Set these values so the installer can still run in color # Set these values so the installer can still run in color
@ -185,26 +184,26 @@ if is_command apt-get ; then
# A variable to store the command used to update the package cache # A variable to store the command used to update the package cache
UPDATE_PKG_CACHE="${PKG_MANAGER} update" UPDATE_PKG_CACHE="${PKG_MANAGER} update"
# An array for something... # An array for something...
PKG_INSTALL=(${PKG_MANAGER} --yes --no-install-recommends install) PKG_INSTALL=("${PKG_MANAGER}" --yes --no-install-recommends install)
# grep -c will return 1 retVal on 0 matches, block this throwing the set -e with an OR TRUE # grep -c will return 1 retVal on 0 matches, block this throwing the set -e with an OR TRUE
PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true" PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true"
# Some distros vary slightly so these fixes for dependencies may apply # Some distros vary slightly so these fixes for dependencies may apply
# on Ubuntu 18.04.1 LTS we need to add the universe repository to gain access to dialog and dhcpcd5 # on Ubuntu 18.04.1 LTS we need to add the universe repository to gain access to dhcpcd5
APT_SOURCES="/etc/apt/sources.list" APT_SOURCES="/etc/apt/sources.list"
if awk 'BEGIN{a=1;b=0}/bionic main/{a=0}/bionic.*universe/{b=1}END{exit a + b}' ${APT_SOURCES}; then if awk 'BEGIN{a=1;b=0}/bionic main/{a=0}/bionic.*universe/{b=1}END{exit a + b}' ${APT_SOURCES}; then
if ! whiptail --defaultno --title "Dependencies Require Update to Allowed Repositories" --yesno "Would you like to enable 'universe' repository?\\n\\nThis repository is required by the following packages:\\n\\n- dhcpcd5\\n- dialog" ${r} ${c}; then if ! whiptail --defaultno --title "Dependencies Require Update to Allowed Repositories" --yesno "Would you like to enable 'universe' repository?\\n\\nThis repository is required by the following packages:\\n\\n- dhcpcd5" "${r}" "${c}"; then
printf " %b Aborting installation: dependencies could not be installed.\\n" "${CROSS}" printf " %b Aborting installation: dependencies could not be installed.\\n" "${CROSS}"
exit # exit the installer exit # exit the installer
else else
printf " %b Enabling universe package repository for Ubuntu Bionic\\n" "${INFO}" printf " %b Enabling universe package repository for Ubuntu Bionic\\n" "${INFO}"
cp ${APT_SOURCES} ${APT_SOURCES}.backup # Backup current repo list cp -p ${APT_SOURCES} ${APT_SOURCES}.backup # Backup current repo list
printf " %b Backed up current configuration to %s\\n" "${TICK}" "${APT_SOURCES}.backup" printf " %b Backed up current configuration to %s\\n" "${TICK}" "${APT_SOURCES}.backup"
add-apt-repository universe add-apt-repository universe
printf " %b Enabled %s\\n" "${TICK}" "'universe' repository" printf " %b Enabled %s\\n" "${TICK}" "'universe' repository"
fi fi
fi fi
# Debian 7 doesn't have iproute2 so if the dry run install is successful, # Debian 7 doesn't have iproute2 so if the dry run install is successful,
if ${PKG_MANAGER} install --dry-run iproute2 > /dev/null 2>&1; then if "${PKG_MANAGER}" install --dry-run iproute2 > /dev/null 2>&1; then
# we can install it # we can install it
iproute_pkg="iproute2" iproute_pkg="iproute2"
# Otherwise, # Otherwise,
@ -225,7 +224,7 @@ if is_command apt-get ; then
# Check if installed php is v 7.0, or newer to determine packages to install # Check if installed php is v 7.0, or newer to determine packages to install
if [[ "$phpInsNewer" != true ]]; then if [[ "$phpInsNewer" != true ]]; then
# Prefer the php metapackage if it's there # Prefer the php metapackage if it's there
if ${PKG_MANAGER} install --dry-run php > /dev/null 2>&1; then if "${PKG_MANAGER}" install --dry-run php > /dev/null 2>&1; then
phpVer="php" phpVer="php"
# fall back on the php5 packages # fall back on the php5 packages
else else
@ -236,19 +235,19 @@ if is_command apt-get ; then
phpVer="php$phpInsMajor.$phpInsMinor" phpVer="php$phpInsMajor.$phpInsMinor"
fi fi
# We also need the correct version for `php-sqlite` (which differs across distros) # We also need the correct version for `php-sqlite` (which differs across distros)
if ${PKG_MANAGER} install --dry-run ${phpVer}-sqlite3 > /dev/null 2>&1; then if "${PKG_MANAGER}" install --dry-run "${phpVer}-sqlite3" > /dev/null 2>&1; then
phpSqlite="sqlite3" phpSqlite="sqlite3"
else else
phpSqlite="sqlite" phpSqlite="sqlite"
fi fi
# Since our install script is so large, we need several other programs to successfully get a machine provisioned # Since our install script is so large, we need several other programs to successfully get a machine provisioned
# These programs are stored in an array so they can be looped through later # These programs are stored in an array so they can be looped through later
INSTALLER_DEPS=(apt-utils dialog debconf dhcpcd5 git ${iproute_pkg} whiptail) INSTALLER_DEPS=(dhcpcd5 git "${iproute_pkg}" whiptail)
# Pi-hole itself has several dependencies that also need to be installed # Pi-hole itself has several dependencies that also need to be installed
PIHOLE_DEPS=(cron curl dnsutils iputils-ping lsof netcat psmisc sudo unzip wget idn2 sqlite3 libcap2-bin dns-root-data resolvconf libcap2) PIHOLE_DEPS=(cron curl dnsutils iputils-ping lsof netcat psmisc sudo unzip wget idn2 sqlite3 libcap2-bin dns-root-data libcap2)
# The Web dashboard has some that also need to be installed # The Web dashboard has some that also need to be installed
# It's useful to separate the two since our repos are also setup as "Core" code and "Web" code # It's useful to separate the two since our repos are also setup as "Core" code and "Web" code
PIHOLE_WEB_DEPS=(lighttpd ${phpVer}-common ${phpVer}-cgi ${phpVer}-${phpSqlite}) PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "php-intl")
# The Web server user, # The Web server user,
LIGHTTPD_USER="www-data" LIGHTTPD_USER="www-data"
# group, # group,
@ -284,19 +283,18 @@ elif is_command rpm ; then
# Fedora and family update cache on every PKG_INSTALL call, no need for a separate update. # Fedora and family update cache on every PKG_INSTALL call, no need for a separate update.
UPDATE_PKG_CACHE=":" UPDATE_PKG_CACHE=":"
PKG_INSTALL=(${PKG_MANAGER} install -y) PKG_INSTALL=("${PKG_MANAGER}" install -y)
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l" PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
INSTALLER_DEPS=(dialog git iproute newt procps-ng which chkconfig) INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig)
PIHOLE_DEPS=(bind-utils cronie curl findutils nmap-ncat sudo unzip wget libidn2 psmisc sqlite libcap) PIHOLE_DEPS=(bind-utils cronie curl findutils nmap-ncat sudo unzip wget libidn2 psmisc sqlite libcap)
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo) PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
LIGHTTPD_USER="lighttpd" LIGHTTPD_USER="lighttpd"
LIGHTTPD_GROUP="lighttpd" LIGHTTPD_GROUP="lighttpd"
LIGHTTPD_CFG="lighttpd.conf.fedora" LIGHTTPD_CFG="lighttpd.conf.fedora"
# If the host OS is Fedora, # If the host OS is Fedora,
if grep -qiE 'fedora|fedberry' /etc/redhat-release; then if grep -qiE 'fedora|fedberry' /etc/redhat-release; then
# all required packages should be available by default with the latest fedora release # all required packages should be available by default with the latest fedora release
# ensure 'php-json' is installed on Fedora (installed as dependency on CentOS7 + Remi repository) : # continue
PIHOLE_WEB_DEPS+=('php-json')
# or if host OS is CentOS, # or if host OS is CentOS,
elif grep -qiE 'centos|scientific' /etc/redhat-release; then elif grep -qiE 'centos|scientific' /etc/redhat-release; then
# Pi-Hole currently supports CentOS 7+ with PHP7+ # Pi-Hole currently supports CentOS 7+ with PHP7+
@ -311,7 +309,21 @@ elif is_command rpm ; then
# exit the installer # exit the installer
exit exit
fi fi
# on CentOS we need to add the EPEL repository to gain access to Fedora packages # php-json is not required on CentOS 7 as it is already compiled into php
# verifiy via `php -m | grep json`
if [[ $CURRENT_CENTOS_VERSION -eq 7 ]]; then
# create a temporary array as arrays are not designed for use as mutable data structures
CENTOS7_PIHOLE_WEB_DEPS=()
for i in "${!PIHOLE_WEB_DEPS[@]}"; do
if [[ ${PIHOLE_WEB_DEPS[i]} != "php-json" ]]; then
CENTOS7_PIHOLE_WEB_DEPS+=( "${PIHOLE_WEB_DEPS[i]}" )
fi
done
# re-assign the clean dependency array back to PIHOLE_WEB_DEPS
PIHOLE_WEB_DEPS=("${CENTOS7_PIHOLE_WEB_DEPS[@]}")
unset CENTOS7_PIHOLE_WEB_DEPS
fi
# CentOS requires the EPEL repository to gain access to Fedora packages
EPEL_PKG="epel-release" EPEL_PKG="epel-release"
rpm -q ${EPEL_PKG} &> /dev/null || rc=$? rpm -q ${EPEL_PKG} &> /dev/null || rc=$?
if [[ $rc -ne 0 ]]; then if [[ $rc -ne 0 ]]; then
@ -322,7 +334,7 @@ elif is_command rpm ; then
# The default php on CentOS 7.x is 5.4 which is EOL # The default php on CentOS 7.x is 5.4 which is EOL
# Check if the version of PHP available via installed repositories is >= to PHP 7 # Check if the version of PHP available via installed repositories is >= to PHP 7
AVAILABLE_PHP_VERSION=$(${PKG_MANAGER} info php | grep -i version | grep -o '[0-9]\+' | head -1) AVAILABLE_PHP_VERSION=$("${PKG_MANAGER}" info php | grep -i version | grep -o '[0-9]\+' | head -1)
if [[ $AVAILABLE_PHP_VERSION -ge $SUPPORTED_CENTOS_PHP_VERSION ]]; then if [[ $AVAILABLE_PHP_VERSION -ge $SUPPORTED_CENTOS_PHP_VERSION ]]; then
# Since PHP 7 is available by default, install via default PHP package names # Since PHP 7 is available by default, install via default PHP package names
: # do nothing as PHP is current : # do nothing as PHP is current
@ -332,7 +344,7 @@ elif is_command rpm ; then
rpm -q ${REMI_PKG} &> /dev/null || rc=$? rpm -q ${REMI_PKG} &> /dev/null || rc=$?
if [[ $rc -ne 0 ]]; then if [[ $rc -ne 0 ]]; then
# The PHP version available via default repositories is older than version 7 # The PHP version available via default repositories is older than version 7
if ! whiptail --defaultno --title "PHP 7 Update (recommended)" --yesno "PHP 7.x is recommended for both security and language features.\\nWould you like to install PHP7 via Remi's RPM repository?\\n\\nSee: https://rpms.remirepo.net for more information" ${r} ${c}; then if ! whiptail --defaultno --title "PHP 7 Update (recommended)" --yesno "PHP 7.x is recommended for both security and language features.\\nWould you like to install PHP7 via Remi's RPM repository?\\n\\nSee: https://rpms.remirepo.net for more information" "${r}" "${c}"; then
# User decided to NOT update PHP from REMI, attempt to install the default available PHP version # User decided to NOT update PHP from REMI, attempt to install the default available PHP version
printf " %b User opt-out of PHP 7 upgrade on CentOS. Deprecated PHP may be in use.\\n" "${INFO}" printf " %b User opt-out of PHP 7 upgrade on CentOS. Deprecated PHP may be in use.\\n" "${INFO}"
: # continue with unsupported php version : # continue with unsupported php version
@ -355,7 +367,7 @@ elif is_command rpm ; then
fi fi
else else
# Warn user of unsupported version of Fedora or CentOS # Warn user of unsupported version of Fedora or CentOS
if ! whiptail --defaultno --title "Unsupported RPM based distribution" --yesno "Would you like to continue installation on an unsupported RPM based distribution?\\n\\nPlease ensure the following packages have been installed manually:\\n\\n- lighttpd\\n- lighttpd-fastcgi\\n- PHP version 7+" ${r} ${c}; then if ! whiptail --defaultno --title "Unsupported RPM based distribution" --yesno "Would you like to continue installation on an unsupported RPM based distribution?\\n\\nPlease ensure the following packages have been installed manually:\\n\\n- lighttpd\\n- lighttpd-fastcgi\\n- PHP version 7+" "${r}" "${c}"; then
printf " %b Aborting installation due to unsupported RPM based distribution\\n" "${CROSS}" printf " %b Aborting installation due to unsupported RPM based distribution\\n" "${CROSS}"
exit # exit the installer exit # exit the installer
else else
@ -377,16 +389,12 @@ is_repo() {
# Use a named, local variable instead of the vague $1, which is the first argument passed to this function # Use a named, local variable instead of the vague $1, which is the first argument passed to this function
# These local variables should always be lowercase # These local variables should always be lowercase
local directory="${1}" local directory="${1}"
# A local variable for the current directory
local curdir
# A variable to store the return code # A variable to store the return code
local rc local rc
# Assign the current directory variable by using pwd
curdir="${PWD}"
# If the first argument passed to this function is a directory, # If the first argument passed to this function is a directory,
if [[ -d "${directory}" ]]; then if [[ -d "${directory}" ]]; then
# move into the directory # move into the directory
cd "${directory}" pushd "${directory}" &> /dev/null || return 1
# Use git to check if the directory is a repo # Use git to check if the directory is a repo
# git -C is not used here to support git versions older than 1.8.4 # git -C is not used here to support git versions older than 1.8.4
git status --short &> /dev/null || rc=$? git status --short &> /dev/null || rc=$?
@ -396,7 +404,7 @@ is_repo() {
rc=1 rc=1
fi fi
# Move back into the directory the user started in # Move back into the directory the user started in
cd "${curdir}" popd &> /dev/null || return 1
# Return the code; if one is not set, return 0 # Return the code; if one is not set, return 0
return "${rc:-0}" return "${rc:-0}"
} }
@ -406,6 +414,7 @@ make_repo() {
# Set named variables for better readability # Set named variables for better readability
local directory="${1}" local directory="${1}"
local remoteRepo="${2}" local remoteRepo="${2}"
# The message to display when this function is running # The message to display when this function is running
str="Clone ${remoteRepo} into ${directory}" str="Clone ${remoteRepo} into ${directory}"
# Display the message and use the color table to preface the message with an "info" indicator # Display the message and use the color table to preface the message with an "info" indicator
@ -417,9 +426,21 @@ make_repo() {
fi fi
# Clone the repo and return the return code from this command # Clone the repo and return the return code from this command
git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $? git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $?
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
chmod -R a+rX "${directory}"
# Move into the directory that was passed as an argument
pushd "${directory}" &> /dev/null || return 1
# Check current branch. If it is master, then reset to the latest availible tag.
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
curBranch=$(git rev-parse --abbrev-ref HEAD)
if [[ "${curBranch}" == "master" ]]; then #If we're calling make_repo() then it should always be master, we may not need to check.
git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
fi
# Show a colored message showing it's status # Show a colored message showing it's status
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
# Always return 0? Not sure this is correct
# Move back into the original directory
popd &> /dev/null || return 1
return 0 return 0
} }
@ -430,17 +451,14 @@ update_repo() {
# but since they are local, their scope does not go beyond this function # but since they are local, their scope does not go beyond this function
# This helps prevent the wrong value from being assigned if you were to set the variable as a GLOBAL one # This helps prevent the wrong value from being assigned if you were to set the variable as a GLOBAL one
local directory="${1}" local directory="${1}"
local curdir local curBranch
# A variable to store the message we want to display; # A variable to store the message we want to display;
# Again, it's useful to store these in variables in case we need to reuse or change the message; # Again, it's useful to store these in variables in case we need to reuse or change the message;
# we only need to make one change here # we only need to make one change here
local str="Update repo in ${1}" local str="Update repo in ${1}"
# Make sure we know what directory we are in so we can move back into it
curdir="${PWD}"
# Move into the directory that was passed as an argument # Move into the directory that was passed as an argument
cd "${directory}" &> /dev/null || return 1 pushd "${directory}" &> /dev/null || return 1
# Let the user know what's happening # Let the user know what's happening
printf " %b %s..." "${INFO}" "${str}" printf " %b %s..." "${INFO}" "${str}"
# Stash any local commits as they conflict with our working code # Stash any local commits as they conflict with our working code
@ -448,10 +466,18 @@ update_repo() {
git clean --quiet --force -d || true # Okay for already clean directory git clean --quiet --force -d || true # Okay for already clean directory
# Pull the latest commits # Pull the latest commits
git pull --quiet &> /dev/null || return $? git pull --quiet &> /dev/null || return $?
# Check current branch. If it is master, then reset to the latest availible tag.
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
curBranch=$(git rev-parse --abbrev-ref HEAD)
if [[ "${curBranch}" == "master" ]]; then
git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
fi
# Show a completion message # Show a completion message
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
chmod -R a+rX "${directory}"
# Move back into the original directory # Move back into the original directory
cd "${curdir}" &> /dev/null || return 1 popd &> /dev/null || return 1
return 0 return 0
} }
@ -490,15 +516,19 @@ resetRepo() {
# Use named variables for arguments # Use named variables for arguments
local directory="${1}" local directory="${1}"
# Move into the directory # Move into the directory
cd "${directory}" &> /dev/null || return 1 pushd "${directory}" &> /dev/null || return 1
# Store the message in a variable # Store the message in a variable
str="Resetting repository within ${1}..." str="Resetting repository within ${1}..."
# Show the message # Show the message
printf " %b %s..." "${INFO}" "${str}" printf " %b %s..." "${INFO}" "${str}"
# Use git to remove the local changes # Use git to remove the local changes
git reset --hard &> /dev/null || return $? git reset --hard &> /dev/null || return $?
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
chmod -R a+rX "${directory}"
# And show the status # And show the status
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
# Return to where we came from
popd &> /dev/null || return 1
# Returning success anyway? # Returning success anyway?
return 0 return 0
} }
@ -540,15 +570,15 @@ get_available_interfaces() {
# A function for displaying the dialogs the user sees when first running the installer # A function for displaying the dialogs the user sees when first running the installer
welcomeDialogs() { welcomeDialogs() {
# Display the welcome dialog using an appropriately sized window via the calculation conducted earlier in the script # Display the welcome dialog using an appropriately sized window via the calculation conducted earlier in the script
whiptail --msgbox --backtitle "Welcome" --title "Pi-hole automated installer" "\\n\\nThis installer will transform your device into a network-wide ad blocker!" ${r} ${c} whiptail --msgbox --backtitle "Welcome" --title "Pi-hole automated installer" "\\n\\nThis installer will transform your device into a network-wide ad blocker!" "${r}" "${c}"
# Request that users donate if they enjoy the software since we all work on it in our free time # Request that users donate if they enjoy the software since we all work on it in our free time
whiptail --msgbox --backtitle "Plea" --title "Free and open source" "\\n\\nThe Pi-hole is free, but powered by your donations: http://pi-hole.net/donate" ${r} ${c} whiptail --msgbox --backtitle "Plea" --title "Free and open source" "\\n\\nThe Pi-hole is free, but powered by your donations: http://pi-hole.net/donate" "${r}" "${c}"
# Explain the need for a static address # Explain the need for a static address
whiptail --msgbox --backtitle "Initiating network interface" --title "Static IP Needed" "\\n\\nThe Pi-hole is a SERVER so it needs a STATIC IP ADDRESS to function properly. whiptail --msgbox --backtitle "Initiating network interface" --title "Static IP Needed" "\\n\\nThe Pi-hole is a SERVER so it needs a STATIC IP ADDRESS to function properly.
In the next section, you can choose to use your current network settings (DHCP) or to manually edit them." ${r} ${c} In the next section, you can choose to use your current network settings (DHCP) or to manually edit them." "${r}" "${c}"
} }
# We need to make sure there is enough space before installing, so there is a function to check this # We need to make sure there is enough space before installing, so there is a function to check this
@ -635,7 +665,7 @@ chooseInterface() {
# Feed the available interfaces into this while loop # Feed the available interfaces into this while loop
done <<< "${availableInterfaces}" done <<< "${availableInterfaces}"
# The whiptail command that will be run, stored in a variable # The whiptail command that will be run, stored in a variable
chooseInterfaceCmd=(whiptail --separate-output --radiolist "Choose An Interface (press space to select)" ${r} ${c} ${interfaceCount}) chooseInterfaceCmd=(whiptail --separate-output --radiolist "Choose An Interface (press space to select)" "${r}" "${c}" "${interfaceCount}")
# Now run the command using the interfaces saved into the array # Now run the command using the interfaces saved into the array
chooseInterfaceOptions=$("${chooseInterfaceCmd[@]}" "${interfacesArray[@]}" 2>&1 >/dev/tty) || \ chooseInterfaceOptions=$("${chooseInterfaceCmd[@]}" "${interfacesArray[@]}" 2>&1 >/dev/tty) || \
# If the user chooses Cancel, exit # If the user chooses Cancel, exit
@ -716,7 +746,7 @@ useIPv6dialog() {
# If the IPV6_ADDRESS contains a value # If the IPV6_ADDRESS contains a value
if [[ ! -z "${IPV6_ADDRESS}" ]]; then if [[ ! -z "${IPV6_ADDRESS}" ]]; then
# Display that IPv6 is supported and will be used # Display that IPv6 is supported and will be used
whiptail --msgbox --backtitle "IPv6..." --title "IPv6 Supported" "$IPV6_ADDRESS will be used to block ads." ${r} ${c} whiptail --msgbox --backtitle "IPv6..." --title "IPv6 Supported" "$IPV6_ADDRESS will be used to block ads." "${r}" "${c}"
fi fi
} }
@ -726,7 +756,7 @@ use4andor6() {
local useIPv4 local useIPv4
local useIPv6 local useIPv6
# Let use select IPv4 and/or IPv6 via a checklist # Let use select IPv4 and/or IPv6 via a checklist
cmd=(whiptail --separate-output --checklist "Select Protocols (press space to select)" ${r} ${c} 2) cmd=(whiptail --separate-output --checklist "Select Protocols (press space to select)" "${r}" "${c}" 2)
# In an array, show the options available: # In an array, show the options available:
# IPv4 (on by default) # IPv4 (on by default)
options=(IPv4 "Block ads over IPv4" on options=(IPv4 "Block ads over IPv4" on
@ -775,11 +805,11 @@ getStaticIPv4Settings() {
# This is useful for users that are using DHCP reservations; then we can just use the information gathered via our functions # This is useful for users that are using DHCP reservations; then we can just use the information gathered via our functions
if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Do you want to use your current network settings as a static address? if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Do you want to use your current network settings as a static address?
IP address: ${IPV4_ADDRESS} IP address: ${IPV4_ADDRESS}
Gateway: ${IPv4gw}" ${r} ${c}; then Gateway: ${IPv4gw}" "${r}" "${c}"; then
# If they choose yes, let the user know that the IP address will not be available via DHCP and may cause a conflict. # If they choose yes, let the user know that the IP address will not be available via DHCP and may cause a conflict.
whiptail --msgbox --backtitle "IP information" --title "FYI: IP Conflict" "It is possible your router could still try to assign this IP to a device, which would cause a conflict. But in most cases the router is smart enough to not do that. whiptail --msgbox --backtitle "IP information" --title "FYI: IP Conflict" "It is possible your router could still try to assign this IP to a device, which would cause a conflict. But in most cases the router is smart enough to not do that.
If you are worried, either manually set the address, or modify the DHCP reservation pool so it does not include the IP you want. If you are worried, either manually set the address, or modify the DHCP reservation pool so it does not include the IP you want.
It is also possible to use a DHCP reservation, but if you are going to do that, you might as well set a static address." ${r} ${c} It is also possible to use a DHCP reservation, but if you are going to do that, you might as well set a static address." "${r}" "${c}"
# Nothing else to do since the variables are already set above # Nothing else to do since the variables are already set above
else else
# Otherwise, we need to ask the user to input their desired settings. # Otherwise, we need to ask the user to input their desired settings.
@ -788,13 +818,13 @@ It is also possible to use a DHCP reservation, but if you are going to do that,
until [[ "${ipSettingsCorrect}" = True ]]; do until [[ "${ipSettingsCorrect}" = True ]]; do
# Ask for the IPv4 address # Ask for the IPv4 address
IPV4_ADDRESS=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 address" --inputbox "Enter your desired IPv4 address" ${r} ${c} "${IPV4_ADDRESS}" 3>&1 1>&2 2>&3) || \ IPV4_ADDRESS=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 address" --inputbox "Enter your desired IPv4 address" "${r}" "${c}" "${IPV4_ADDRESS}" 3>&1 1>&2 2>&3) || \
# Cancelling IPv4 settings window # Cancelling IPv4 settings window
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; } { ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
printf " %b Your static IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}" printf " %b Your static IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}"
# Ask for the gateway # Ask for the gateway
IPv4gw=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 gateway (router)" --inputbox "Enter your desired IPv4 default gateway" ${r} ${c} "${IPv4gw}" 3>&1 1>&2 2>&3) || \ IPv4gw=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 gateway (router)" --inputbox "Enter your desired IPv4 default gateway" "${r}" "${c}" "${IPv4gw}" 3>&1 1>&2 2>&3) || \
# Cancelling gateway settings window # Cancelling gateway settings window
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; } { ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
printf " %b Your static IPv4 gateway: %s\\n" "${INFO}" "${IPv4gw}" printf " %b Your static IPv4 gateway: %s\\n" "${INFO}" "${IPv4gw}"
@ -802,7 +832,7 @@ It is also possible to use a DHCP reservation, but if you are going to do that,
# Give the user a chance to review their settings before moving on # Give the user a chance to review their settings before moving on
if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Are these settings correct? if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Are these settings correct?
IP address: ${IPV4_ADDRESS} IP address: ${IPV4_ADDRESS}
Gateway: ${IPv4gw}" ${r} ${c}; then Gateway: ${IPv4gw}" "${r}" "${c}"; then
# After that's done, the loop ends and we move on # After that's done, the loop ends and we move on
ipSettingsCorrect=True ipSettingsCorrect=True
else else
@ -825,11 +855,12 @@ setDHCPCD() {
echo "interface ${PIHOLE_INTERFACE} echo "interface ${PIHOLE_INTERFACE}
static ip_address=${IPV4_ADDRESS} static ip_address=${IPV4_ADDRESS}
static routers=${IPv4gw} static routers=${IPv4gw}
static domain_name_servers=127.0.0.1" | tee -a /etc/dhcpcd.conf >/dev/null static domain_name_servers=${PIHOLE_DNS_1} ${PIHOLE_DNS_2}" | tee -a /etc/dhcpcd.conf >/dev/null
# Then use the ip command to immediately set the new address # Then use the ip command to immediately set the new address
ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}" ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}"
# Also give a warning that the user may need to reboot their system # Also give a warning that the user may need to reboot their system
printf " %b Set IP address to %s \\n You may need to restart after the install is complete\\n" "${TICK}" "${IPV4_ADDRESS%/*}" printf " %b Set IP address to %s\\n" "${TICK}" "${IPV4_ADDRESS%/*}"
printf " %b You may need to restart after the install is complete\\n" "${INFO}"
fi fi
} }
@ -850,7 +881,7 @@ setIFCFG() {
# Put the IP in variables without the CIDR notation # Put the IP in variables without the CIDR notation
printf -v CIDR "%s" "${IPV4_ADDRESS##*/}" printf -v CIDR "%s" "${IPV4_ADDRESS##*/}"
# Backup existing interface configuration: # Backup existing interface configuration:
cp "${IFCFG_FILE}" "${IFCFG_FILE}".pihole.orig cp -p "${IFCFG_FILE}" "${IFCFG_FILE}".pihole.orig
# Build Interface configuration file using the GLOBAL variables we have # Build Interface configuration file using the GLOBAL variables we have
{ {
echo "# Configured via Pi-hole installer" echo "# Configured via Pi-hole installer"
@ -864,6 +895,8 @@ setIFCFG() {
echo "DNS2=$PIHOLE_DNS_2" echo "DNS2=$PIHOLE_DNS_2"
echo "USERCTL=no" echo "USERCTL=no"
}> "${IFCFG_FILE}" }> "${IFCFG_FILE}"
chmod 644 "${IFCFG_FILE}"
chown root:root "${IFCFG_FILE}"
# Use ip to immediately set the new address # Use ip to immediately set the new address
ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}" ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}"
# If NetworkMangler command line interface exists and ready to mangle, # If NetworkMangler command line interface exists and ready to mangle,
@ -928,7 +961,7 @@ valid_ip() {
# and set the new one to a dot (period) # and set the new one to a dot (period)
IFS='.' IFS='.'
# Put the IP into an array # Put the IP into an array
ip=(${ip}) read -r -a ip <<< "${ip}"
# Restore the IFS to what it was # Restore the IFS to what it was
IFS=${OIFS} IFS=${OIFS}
## Evaluate each octet by checking if it's less than or equal to 255 (the max for each octet) ## Evaluate each octet by checking if it's less than or equal to 255 (the max for each octet)
@ -938,7 +971,7 @@ valid_ip() {
stat=$? stat=$?
fi fi
# Return the exit code # Return the exit code
return ${stat} return "${stat}"
} }
# A function to choose the upstream DNS provider(s) # A function to choose the upstream DNS provider(s)
@ -968,13 +1001,11 @@ setDNS() {
# Restore the IFS to what it was # Restore the IFS to what it was
IFS=${OIFS} IFS=${OIFS}
# In a whiptail dialog, show the options # In a whiptail dialog, show the options
DNSchoices=$(whiptail --separate-output --menu "Select Upstream DNS Provider. To use your own, select Custom." ${r} ${c} 7 \ DNSchoices=$(whiptail --separate-output --menu "Select Upstream DNS Provider. To use your own, select Custom." "${r}" "${c}" 7 \
"${DNSChooseOptions[@]}" 2>&1 >/dev/tty) || \ "${DNSChooseOptions[@]}" 2>&1 >/dev/tty) || \
# exit if Cancel is selected # exit if Cancel is selected
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; } { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
# Display the selection
printf " %b Using " "${INFO}"
# Depending on the user's choice, set the GLOBAl variables to the IP of the respective provider # Depending on the user's choice, set the GLOBAl variables to the IP of the respective provider
if [[ "${DNSchoices}" == "Custom" ]] if [[ "${DNSchoices}" == "Custom" ]]
then then
@ -998,7 +1029,7 @@ setDNS() {
fi fi
# Dialog for the user to enter custom upstream servers # Dialog for the user to enter custom upstream servers
piholeDNS=$(whiptail --backtitle "Specify Upstream DNS Provider(s)" --inputbox "Enter your desired upstream DNS provider(s), separated by a comma.\\n\\nFor example '8.8.8.8, 8.8.4.4'" ${r} ${c} "${prePopulate}" 3>&1 1>&2 2>&3) || \ piholeDNS=$(whiptail --backtitle "Specify Upstream DNS Provider(s)" --inputbox "Enter your desired upstream DNS provider(s), separated by a comma.\\n\\nFor example '8.8.8.8, 8.8.4.4'" "${r}" "${c}" "${prePopulate}" 3>&1 1>&2 2>&3) || \
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; } { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
# Clean user input and replace whitespace with comma. # Clean user input and replace whitespace with comma.
piholeDNS=$(sed 's/[, \t]\+/,/g' <<< "${piholeDNS}") piholeDNS=$(sed 's/[, \t]\+/,/g' <<< "${piholeDNS}")
@ -1026,14 +1057,14 @@ setDNS() {
if [[ "${PIHOLE_DNS_2}" == "${strInvalid}" ]]; then if [[ "${PIHOLE_DNS_2}" == "${strInvalid}" ]]; then
PIHOLE_DNS_2="" PIHOLE_DNS_2=""
fi fi
# Since the settings will not work, stay in the loop # Since the settings will not work, stay in the loop
DNSSettingsCorrect=False DNSSettingsCorrect=False
# Otherwise, # Otherwise,
else else
# Show the settings # Show the settings
if (whiptail --backtitle "Specify Upstream DNS Provider(s)" --title "Upstream DNS Provider(s)" --yesno "Are these settings correct?\\n DNS Server 1: $PIHOLE_DNS_1\\n DNS Server 2: ${PIHOLE_DNS_2}" ${r} ${c}); then if (whiptail --backtitle "Specify Upstream DNS Provider(s)" --title "Upstream DNS Provider(s)" --yesno "Are these settings correct?\\n DNS Server 1: $PIHOLE_DNS_1\\n DNS Server 2: ${PIHOLE_DNS_2}" "${r}" "${c}"); then
# and break from the loop since the servers are valid # and break from the loop since the servers are valid
DNSSettingsCorrect=True DNSSettingsCorrect=True
# Otherwise, # Otherwise,
else else
# If the settings are wrong, the loop continues # If the settings are wrong, the loop continues
@ -1041,7 +1072,7 @@ setDNS() {
fi fi
fi fi
done done
else else
# Save the old Internal Field Separator in a variable # Save the old Internal Field Separator in a variable
OIFS=$IFS OIFS=$IFS
# and set the new one to newline # and set the new one to newline
@ -1051,7 +1082,6 @@ setDNS() {
DNSName="$(cut -d';' -f1 <<< "${DNSServer}")" DNSName="$(cut -d';' -f1 <<< "${DNSServer}")"
if [[ "${DNSchoices}" == "${DNSName}" ]] if [[ "${DNSchoices}" == "${DNSName}" ]]
then then
printf "%s\\n" "${DNSName}"
PIHOLE_DNS_1="$(cut -d';' -f2 <<< "${DNSServer}")" PIHOLE_DNS_1="$(cut -d';' -f2 <<< "${DNSServer}")"
PIHOLE_DNS_2="$(cut -d';' -f3 <<< "${DNSServer}")" PIHOLE_DNS_2="$(cut -d';' -f3 <<< "${DNSServer}")"
break break
@ -1060,6 +1090,11 @@ setDNS() {
# Restore the IFS to what it was # Restore the IFS to what it was
IFS=${OIFS} IFS=${OIFS}
fi fi
# Display final selection
local DNSIP=${PIHOLE_DNS_1}
[[ -z ${PIHOLE_DNS_2} ]] || DNSIP+=", ${PIHOLE_DNS_2}"
printf " %b Using upstream DNS: %s (%s)\\n" "${INFO}" "${DNSchoices}" "${DNSIP}"
} }
# Allow the user to enable/disable logging # Allow the user to enable/disable logging
@ -1122,7 +1157,7 @@ setAdminFlag() {
local WebChoices local WebChoices
# Similar to the logging function, ask what the user wants # Similar to the logging function, ask what the user wants
WebToggleCommand=(whiptail --separate-output --radiolist "Do you wish to install the web admin interface?" ${r} ${c} 6) WebToggleCommand=(whiptail --separate-output --radiolist "Do you wish to install the web admin interface?" "${r}" "${c}" 6)
# with the default being enabled # with the default being enabled
WebChooseOptions=("On (Recommended)" "" on WebChooseOptions=("On (Recommended)" "" on
Off "" off) Off "" off)
@ -1171,14 +1206,12 @@ chooseBlocklists() {
mv "${adlistFile}" "${adlistFile}.old" mv "${adlistFile}" "${adlistFile}.old"
fi fi
# Let user select (or not) blocklists via a checklist # Let user select (or not) blocklists via a checklist
cmd=(whiptail --separate-output --checklist "Pi-hole relies on third party lists in order to block ads.\\n\\nYou can use the suggestions below, and/or add your own after installation\\n\\nTo deselect any list, use the arrow keys and spacebar" "${r}" "${c}" 6) cmd=(whiptail --separate-output --checklist "Pi-hole relies on third party lists in order to block ads.\\n\\nYou can use the suggestions below, and/or add your own after installation\\n\\nTo deselect any list, use the arrow keys and spacebar" "${r}" "${c}" 5)
# In an array, show the options available (all off by default): # In an array, show the options available (all off by default):
options=(StevenBlack "StevenBlack's Unified Hosts List" on options=(StevenBlack "StevenBlack's Unified Hosts List" on
MalwareDom "MalwareDomains" on MalwareDom "MalwareDomains" on
Cameleon "Cameleon" on
DisconTrack "Disconnect.me Tracking" on DisconTrack "Disconnect.me Tracking" on
DisconAd "Disconnect.me Ads" on DisconAd "Disconnect.me Ads" on)
HostsFile "Hosts-file.net Ads" on)
# In a variable, show the choices available; exit if Cancel is selected # In a variable, show the choices available; exit if Cancel is selected
choices=$("${cmd[@]}" "${options[@]}" 2>&1 >/dev/tty) || { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; rm "${adlistFile}" ;exit 1; } choices=$("${cmd[@]}" "${options[@]}" 2>&1 >/dev/tty) || { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; rm "${adlistFile}" ;exit 1; }
@ -1187,6 +1220,7 @@ chooseBlocklists() {
do do
appendToListsFile "${choice}" appendToListsFile "${choice}"
done done
chmod 644 "${adlistFile}"
} }
# Accept a string parameter, it must be one of the default lists # Accept a string parameter, it must be one of the default lists
@ -1196,10 +1230,8 @@ appendToListsFile() {
case $1 in case $1 in
StevenBlack ) echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}";; StevenBlack ) echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}";;
MalwareDom ) echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}";; MalwareDom ) echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}";;
Cameleon ) echo "http://sysctl.org/cameleon/hosts" >> "${adlistFile}";;
DisconTrack ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}";; DisconTrack ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}";;
DisconAd ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}";; DisconAd ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}";;
HostsFile ) echo "https://hosts-file.net/ad_servers.txt" >> "${adlistFile}";;
esac esac
} }
@ -1213,10 +1245,8 @@ installDefaultBlocklists() {
fi fi
appendToListsFile StevenBlack appendToListsFile StevenBlack
appendToListsFile MalwareDom appendToListsFile MalwareDom
appendToListsFile Cameleon
appendToListsFile DisconTrack appendToListsFile DisconTrack
appendToListsFile DisconAd appendToListsFile DisconAd
appendToListsFile HostsFile
} }
# Check if /etc/dnsmasq.conf is from pi-hole. If so replace with an original and install new in .d directory # Check if /etc/dnsmasq.conf is from pi-hole. If so replace with an original and install new in .d directory
@ -1225,6 +1255,7 @@ version_check_dnsmasq() {
local dnsmasq_conf="/etc/dnsmasq.conf" local dnsmasq_conf="/etc/dnsmasq.conf"
local dnsmasq_conf_orig="/etc/dnsmasq.conf.orig" local dnsmasq_conf_orig="/etc/dnsmasq.conf.orig"
local dnsmasq_pihole_id_string="addn-hosts=/etc/pihole/gravity.list" local dnsmasq_pihole_id_string="addn-hosts=/etc/pihole/gravity.list"
local dnsmasq_pihole_id_string2="# Dnsmasq config for Pi-hole's FTLDNS"
local dnsmasq_original_config="${PI_HOLE_LOCAL_REPO}/advanced/dnsmasq.conf.original" local dnsmasq_original_config="${PI_HOLE_LOCAL_REPO}/advanced/dnsmasq.conf.original"
local dnsmasq_pihole_01_snippet="${PI_HOLE_LOCAL_REPO}/advanced/01-pihole.conf" local dnsmasq_pihole_01_snippet="${PI_HOLE_LOCAL_REPO}/advanced/01-pihole.conf"
local dnsmasq_pihole_01_location="/etc/dnsmasq.d/01-pihole.conf" local dnsmasq_pihole_01_location="/etc/dnsmasq.d/01-pihole.conf"
@ -1232,16 +1263,17 @@ version_check_dnsmasq() {
# If the dnsmasq config file exists # If the dnsmasq config file exists
if [[ -f "${dnsmasq_conf}" ]]; then if [[ -f "${dnsmasq_conf}" ]]; then
printf " %b Existing dnsmasq.conf found..." "${INFO}" printf " %b Existing dnsmasq.conf found..." "${INFO}"
# If gravity.list is found within this file, we presume it's from older versions on Pi-hole, # If a specific string is found within this file, we presume it's from older versions on Pi-hole,
if grep -q ${dnsmasq_pihole_id_string} ${dnsmasq_conf}; then if grep -q "${dnsmasq_pihole_id_string}" "${dnsmasq_conf}" ||
grep -q "${dnsmasq_pihole_id_string2}" "${dnsmasq_conf}"; then
printf " it is from a previous Pi-hole install.\\n" printf " it is from a previous Pi-hole install.\\n"
printf " %b Backing up dnsmasq.conf to dnsmasq.conf.orig..." "${INFO}" printf " %b Backing up dnsmasq.conf to dnsmasq.conf.orig..." "${INFO}"
# so backup the original file # so backup the original file
mv -f ${dnsmasq_conf} ${dnsmasq_conf_orig} mv -f "${dnsmasq_conf}" "${dnsmasq_conf_orig}"
printf "%b %b Backing up dnsmasq.conf to dnsmasq.conf.orig...\\n" "${OVER}" "${TICK}" printf "%b %b Backing up dnsmasq.conf to dnsmasq.conf.orig...\\n" "${OVER}" "${TICK}"
printf " %b Restoring default dnsmasq.conf..." "${INFO}" printf " %b Restoring default dnsmasq.conf..." "${INFO}"
# and replace it with the default # and replace it with the default
cp ${dnsmasq_original_config} ${dnsmasq_conf} install -D -m 644 -T "${dnsmasq_original_config}" "${dnsmasq_conf}"
printf "%b %b Restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}" printf "%b %b Restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}"
# Otherwise, # Otherwise,
else else
@ -1252,47 +1284,47 @@ version_check_dnsmasq() {
# If a file cannot be found, # If a file cannot be found,
printf " %b No dnsmasq.conf found... restoring default dnsmasq.conf..." "${INFO}" printf " %b No dnsmasq.conf found... restoring default dnsmasq.conf..." "${INFO}"
# restore the default one # restore the default one
cp ${dnsmasq_original_config} ${dnsmasq_conf} install -D -m 644 -T "${dnsmasq_original_config}" "${dnsmasq_conf}"
printf "%b %b No dnsmasq.conf found... restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}" printf "%b %b No dnsmasq.conf found... restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}"
fi fi
printf " %b Copying 01-pihole.conf to /etc/dnsmasq.d/01-pihole.conf..." "${INFO}" printf " %b Copying 01-pihole.conf to /etc/dnsmasq.d/01-pihole.conf..." "${INFO}"
# Check to see if dnsmasq directory exists (it may not due to being a fresh install and dnsmasq no longer being a dependency) # Check to see if dnsmasq directory exists (it may not due to being a fresh install and dnsmasq no longer being a dependency)
if [[ ! -d "/etc/dnsmasq.d" ]];then if [[ ! -d "/etc/dnsmasq.d" ]];then
mkdir "/etc/dnsmasq.d" install -d -m 755 "/etc/dnsmasq.d"
fi fi
# Copy the new Pi-hole DNS config file into the dnsmasq.d directory # Copy the new Pi-hole DNS config file into the dnsmasq.d directory
cp ${dnsmasq_pihole_01_snippet} ${dnsmasq_pihole_01_location} install -D -m 644 -T "${dnsmasq_pihole_01_snippet}" "${dnsmasq_pihole_01_location}"
printf "%b %b Copying 01-pihole.conf to /etc/dnsmasq.d/01-pihole.conf\\n" "${OVER}" "${TICK}" printf "%b %b Copying 01-pihole.conf to /etc/dnsmasq.d/01-pihole.conf\\n" "${OVER}" "${TICK}"
# Replace our placeholder values with the GLOBAL DNS variables that we populated earlier # Replace our placeholder values with the GLOBAL DNS variables that we populated earlier
# First, swap in the interface to listen on # First, swap in the interface to listen on
sed -i "s/@INT@/$PIHOLE_INTERFACE/" ${dnsmasq_pihole_01_location} sed -i "s/@INT@/$PIHOLE_INTERFACE/" "${dnsmasq_pihole_01_location}"
if [[ "${PIHOLE_DNS_1}" != "" ]]; then if [[ "${PIHOLE_DNS_1}" != "" ]]; then
# Then swap in the primary DNS server # Then swap in the primary DNS server
sed -i "s/@DNS1@/$PIHOLE_DNS_1/" ${dnsmasq_pihole_01_location} sed -i "s/@DNS1@/$PIHOLE_DNS_1/" "${dnsmasq_pihole_01_location}"
else else
# #
sed -i '/^server=@DNS1@/d' ${dnsmasq_pihole_01_location} sed -i '/^server=@DNS1@/d' "${dnsmasq_pihole_01_location}"
fi fi
if [[ "${PIHOLE_DNS_2}" != "" ]]; then if [[ "${PIHOLE_DNS_2}" != "" ]]; then
# Then swap in the primary DNS server # Then swap in the primary DNS server
sed -i "s/@DNS2@/$PIHOLE_DNS_2/" ${dnsmasq_pihole_01_location} sed -i "s/@DNS2@/$PIHOLE_DNS_2/" "${dnsmasq_pihole_01_location}"
else else
# #
sed -i '/^server=@DNS2@/d' ${dnsmasq_pihole_01_location} sed -i '/^server=@DNS2@/d' "${dnsmasq_pihole_01_location}"
fi fi
# #
sed -i 's/^#conf-dir=\/etc\/dnsmasq.d$/conf-dir=\/etc\/dnsmasq.d/' ${dnsmasq_conf} sed -i 's/^#conf-dir=\/etc\/dnsmasq.d$/conf-dir=\/etc\/dnsmasq.d/' "${dnsmasq_conf}"
# If the user does not want to enable logging, # If the user does not want to enable logging,
if [[ "${QUERY_LOGGING}" == false ]] ; then if [[ "${QUERY_LOGGING}" == false ]] ; then
# Disable it by commenting out the directive in the DNS config file # Disable it by commenting out the directive in the DNS config file
sed -i 's/^log-queries/#log-queries/' ${dnsmasq_pihole_01_location} sed -i 's/^log-queries/#log-queries/' "${dnsmasq_pihole_01_location}"
# Otherwise, # Otherwise,
else else
# enable it by uncommenting the directive in the DNS config file # enable it by uncommenting the directive in the DNS config file
sed -i 's/^#log-queries/log-queries/' ${dnsmasq_pihole_01_location} sed -i 's/^#log-queries/log-queries/' "${dnsmasq_pihole_01_location}"
fi fi
} }
@ -1360,6 +1392,7 @@ installConfigs() {
# Format: Name;Primary IPv4;Secondary IPv4;Primary IPv6;Secondary IPv6 # Format: Name;Primary IPv4;Secondary IPv4;Primary IPv6;Secondary IPv6
# Some values may be empty (for example: DNS servers without IPv6 support) # Some values may be empty (for example: DNS servers without IPv6 support)
echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf" echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
# Install empty file if it does not exist # Install empty file if it does not exist
if [[ ! -r "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then if [[ ! -r "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then
@ -1369,28 +1402,22 @@ installConfigs() {
return 1 return 1
fi fi
fi fi
# Install an empty regex file
if [[ ! -f "${regexFile}" ]]; then
# Let PHP edit the regex file, if installed
install -o pihole -g "${LIGHTTPD_GROUP:-pihole}" -m 664 /dev/null "${regexFile}"
fi
# If the user chose to install the dashboard, # If the user chose to install the dashboard,
if [[ "${INSTALL_WEB_SERVER}" == true ]]; then if [[ "${INSTALL_WEB_SERVER}" == true ]]; then
# and if the Web server conf directory does not exist, # and if the Web server conf directory does not exist,
if [[ ! -d "/etc/lighttpd" ]]; then if [[ ! -d "/etc/lighttpd" ]]; then
# make it # make it and set the owners
mkdir /etc/lighttpd install -d -m 755 -o "${USER}" -g root /etc/lighttpd
# and set the owners
chown "${USER}":root /etc/lighttpd
# Otherwise, if the config file already exists # Otherwise, if the config file already exists
elif [[ -f "/etc/lighttpd/lighttpd.conf" ]]; then elif [[ -f "/etc/lighttpd/lighttpd.conf" ]]; then
# back up the original # back up the original
mv /etc/lighttpd/lighttpd.conf /etc/lighttpd/lighttpd.conf.orig mv /etc/lighttpd/lighttpd.conf /etc/lighttpd/lighttpd.conf.orig
fi fi
# and copy in the config file Pi-hole needs # and copy in the config file Pi-hole needs
cp ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} /etc/lighttpd/lighttpd.conf install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} /etc/lighttpd/lighttpd.conf
# Make sure the external.conf file exists, as lighttpd v1.4.50 crashes without it # Make sure the external.conf file exists, as lighttpd v1.4.50 crashes without it
touch /etc/lighttpd/external.conf touch /etc/lighttpd/external.conf
chmod 644 /etc/lighttpd/external.conf
# if there is a custom block page in the html/pihole directory, replace 404 handler in lighttpd config # if there is a custom block page in the html/pihole directory, replace 404 handler in lighttpd config
if [[ -f "${PI_HOLE_BLOCKPAGE_DIR}/custom.php" ]]; then if [[ -f "${PI_HOLE_BLOCKPAGE_DIR}/custom.php" ]]; then
sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"pihole\/custom\.php"/' /etc/lighttpd/lighttpd.conf sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"pihole\/custom\.php"/' /etc/lighttpd/lighttpd.conf
@ -1421,16 +1448,16 @@ install_manpage() {
fi fi
if [[ ! -d "/usr/local/share/man/man8" ]]; then if [[ ! -d "/usr/local/share/man/man8" ]]; then
# if not present, create man8 directory # if not present, create man8 directory
mkdir /usr/local/share/man/man8 install -d -m 755 /usr/local/share/man/man8
fi fi
if [[ ! -d "/usr/local/share/man/man5" ]]; then if [[ ! -d "/usr/local/share/man/man5" ]]; then
# if not present, create man8 directory # if not present, create man5 directory
mkdir /usr/local/share/man/man5 install -d -m 755 /usr/local/share/man/man5
fi fi
# Testing complete, copy the files & update the man db # Testing complete, copy the files & update the man db
cp ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8 install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8
cp ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8 install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8
cp ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.conf.5 /usr/local/share/man/man5/pihole-FTL.conf.5 install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.conf.5 /usr/local/share/man/man5/pihole-FTL.conf.5
if mandb -q &>/dev/null; then if mandb -q &>/dev/null; then
# Updated successfully # Updated successfully
printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}" printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}"
@ -1612,20 +1639,23 @@ install_dependent_packages() {
# amount of download traffic. # amount of download traffic.
# NOTE: We may be able to use this installArray in the future to create a list of package that were # NOTE: We may be able to use this installArray in the future to create a list of package that were
# installed by us, and remove only the installed packages, and not the entire list. # installed by us, and remove only the installed packages, and not the entire list.
if is_command debconf-apt-progress ; then if is_command apt-get ; then
# For each package, # For each package,
for i in "$@"; do for i in "$@"; do
printf " %b Checking for %s..." "${INFO}" "${i}" printf " %b Checking for %s..." "${INFO}" "${i}"
if dpkg-query -W -f='${Status}' "${i}" 2>/dev/null | grep "ok installed" &> /dev/null; then if dpkg-query -W -f='${Status}' "${i}" 2>/dev/null | grep "ok installed" &> /dev/null; then
printf "%b %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}" printf "%b %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}"
else else
echo -e "${OVER} ${INFO} Checking for $i (will be installed)" printf "%b %b Checking for %s (will be installed)\\n" "${OVER}" "${INFO}" "${i}"
installArray+=("${i}") installArray+=("${i}")
fi fi
done done
if [[ "${#installArray[@]}" -gt 0 ]]; then if [[ "${#installArray[@]}" -gt 0 ]]; then
test_dpkg_lock test_dpkg_lock
debconf-apt-progress -- "${PKG_INSTALL[@]}" "${installArray[@]}" printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
printf '%*s\n' "$columns" '' | tr " " -;
"${PKG_INSTALL[@]}" "${installArray[@]}"
printf '%*s\n' "$columns" '' | tr " " -;
return return
fi fi
printf "\\n" printf "\\n"
@ -1635,15 +1665,18 @@ install_dependent_packages() {
# Install Fedora/CentOS packages # Install Fedora/CentOS packages
for i in "$@"; do for i in "$@"; do
printf " %b Checking for %s..." "${INFO}" "${i}" printf " %b Checking for %s..." "${INFO}" "${i}"
if ${PKG_MANAGER} -q list installed "${i}" &> /dev/null; then if "${PKG_MANAGER}" -q list installed "${i}" &> /dev/null; then
printf "%b %b Checking for %s" "${OVER}" "${TICK}" "${i}" printf "%b %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}"
else else
printf "%b %b Checking for %s (will be installed)" "${OVER}" "${INFO}" "${i}" printf "%b %b Checking for %s (will be installed)\\n" "${OVER}" "${INFO}" "${i}"
installArray+=("${i}") installArray+=("${i}")
fi fi
done done
if [[ "${#installArray[@]}" -gt 0 ]]; then if [[ "${#installArray[@]}" -gt 0 ]]; then
"${PKG_INSTALL[@]}" "${installArray[@]}" &> /dev/null printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
printf '%*s\n' "$columns" '' | tr " " -;
"${PKG_INSTALL[@]}" "${installArray[@]}"
printf '%*s\n' "$columns" '' | tr " " -;
return return
fi fi
printf "\\n" printf "\\n"
@ -1659,7 +1692,7 @@ installPiholeWeb() {
# Install the directory # Install the directory
install -d -m 0755 ${PI_HOLE_BLOCKPAGE_DIR} install -d -m 0755 ${PI_HOLE_BLOCKPAGE_DIR}
# and the blockpage # and the blockpage
install -D ${PI_HOLE_LOCAL_REPO}/advanced/{index,blockingpage}.* ${PI_HOLE_BLOCKPAGE_DIR}/ install -D -m 644 ${PI_HOLE_LOCAL_REPO}/advanced/{index,blockingpage}.* ${PI_HOLE_BLOCKPAGE_DIR}/
# Remove superseded file # Remove superseded file
if [[ -e "${PI_HOLE_BLOCKPAGE_DIR}/index.js" ]]; then if [[ -e "${PI_HOLE_BLOCKPAGE_DIR}/index.js" ]]; then
@ -1678,7 +1711,7 @@ installPiholeWeb() {
# Otherwise, # Otherwise,
else else
# don't do anything # don't do anything
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" printf "%b %b %s\\n" "${OVER}" "${INFO}" "${str}"
printf " No default index.lighttpd.html file found... not backing up\\n" printf " No default index.lighttpd.html file found... not backing up\\n"
fi fi
@ -1686,7 +1719,7 @@ installPiholeWeb() {
local str="Installing sudoer file" local str="Installing sudoer file"
printf "\\n %b %s..." "${INFO}" "${str}" printf "\\n %b %s..." "${INFO}" "${str}"
# Make the .d directory if it doesn't exist # Make the .d directory if it doesn't exist
mkdir -p /etc/sudoers.d/ install -d -m 755 /etc/sudoers.d/
# and copy in the pihole sudoers file # and copy in the pihole sudoers file
install -m 0640 ${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole.sudo /etc/sudoers.d/pihole install -m 0640 ${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole.sudo /etc/sudoers.d/pihole
# Add lighttpd user (OS dependent) to sudoers file # Add lighttpd user (OS dependent) to sudoers file
@ -1709,7 +1742,8 @@ installCron() {
local str="Installing latest Cron script" local str="Installing latest Cron script"
printf "\\n %b %s..." "${INFO}" "${str}" printf "\\n %b %s..." "${INFO}" "${str}"
# Copy the cron file over from the local repo # Copy the cron file over from the local repo
cp ${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole.cron /etc/cron.d/pihole # File must not be world or group writeable and must be owned by root
install -D -m 644 -T -o root -g root ${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole.cron /etc/cron.d/pihole
# Randomize gravity update time # Randomize gravity update time
sed -i "s/59 1 /$((1 + RANDOM % 58)) $((3 + RANDOM % 2))/" /etc/cron.d/pihole sed -i "s/59 1 /$((1 + RANDOM % 58)) $((3 + RANDOM % 2))/" /etc/cron.d/pihole
# Randomize update checker time # Randomize update checker time
@ -1746,45 +1780,6 @@ create_pihole_user() {
fi fi
} }
# Allow HTTP and DNS traffic
configureFirewall() {
printf "\\n"
# If a firewall is running,
if firewall-cmd --state &> /dev/null; then
# ask if the user wants to install Pi-hole's default firewall rules
whiptail --title "Firewall in use" --yesno "We have detected a running firewall\\n\\nPi-hole currently requires HTTP and DNS port access.\\n\\n\\n\\nInstall Pi-hole default firewall rules?" ${r} ${c} || \
{ printf " %b Not installing firewall rulesets.\\n" "${INFO}"; return 0; }
printf " %b Configuring FirewallD for httpd and pihole-FTL\\n" "${TICK}"
# Allow HTTP and DNS traffic
firewall-cmd --permanent --add-service=http --add-service=dns
# Reload the firewall to apply these changes
firewall-cmd --reload
return 0
# Check for proper kernel modules to prevent failure
elif modinfo ip_tables &> /dev/null && is_command iptables ; then
# If chain Policy is not ACCEPT or last Rule is not ACCEPT
# then check and insert our Rules above the DROP/REJECT Rule.
if iptables -S INPUT | head -n1 | grep -qv '^-P.*ACCEPT$' || iptables -S INPUT | tail -n1 | grep -qv '^-\(A\|P\).*ACCEPT$'; then
whiptail --title "Firewall in use" --yesno "We have detected a running firewall\\n\\nPi-hole currently requires HTTP and DNS port access.\\n\\n\\n\\nInstall Pi-hole default firewall rules?" ${r} ${c} || \
{ printf " %b Not installing firewall rulesets.\\n" "${INFO}"; return 0; }
printf " %b Installing new IPTables firewall rulesets\\n" "${TICK}"
# Check chain first, otherwise a new rule will duplicate old ones
iptables -C INPUT -p tcp -m tcp --dport 80 -j ACCEPT &> /dev/null || iptables -I INPUT 1 -p tcp -m tcp --dport 80 -j ACCEPT
iptables -C INPUT -p tcp -m tcp --dport 53 -j ACCEPT &> /dev/null || iptables -I INPUT 1 -p tcp -m tcp --dport 53 -j ACCEPT
iptables -C INPUT -p udp -m udp --dport 53 -j ACCEPT &> /dev/null || iptables -I INPUT 1 -p udp -m udp --dport 53 -j ACCEPT
iptables -C INPUT -p tcp -m tcp --dport 4711:4720 -i lo -j ACCEPT &> /dev/null || iptables -I INPUT 1 -p tcp -m tcp --dport 4711:4720 -i lo -j ACCEPT
return 0
fi
# Otherwise,
else
# no firewall is running
printf " %b No active firewall detected.. skipping firewall configuration\\n" "${INFO}"
# so just exit
return 0
fi
printf " %b Skipping firewall configuration\\n" "${INFO}"
}
# #
finalExports() { finalExports() {
# If the Web interface is not set to be installed, # If the Web interface is not set to be installed,
@ -1817,6 +1812,7 @@ finalExports() {
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}" echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
echo "LIGHTTPD_ENABLED=${LIGHTTPD_ENABLED}" echo "LIGHTTPD_ENABLED=${LIGHTTPD_ENABLED}"
}>> "${setupVars}" }>> "${setupVars}"
chmod 644 "${setupVars}"
# Set the privacy level # Set the privacy level
sed -i '/PRIVACYLEVEL/d' "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" sed -i '/PRIVACYLEVEL/d' "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf"
@ -1839,7 +1835,7 @@ installLogrotate() {
local str="Installing latest logrotate script" local str="Installing latest logrotate script"
printf "\\n %b %s..." "${INFO}" "${str}" printf "\\n %b %s..." "${INFO}" "${str}"
# Copy the file over from the local repo # Copy the file over from the local repo
cp ${PI_HOLE_LOCAL_REPO}/advanced/Templates/logrotate /etc/pihole/logrotate install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/Templates/logrotate /etc/pihole/logrotate
# Different operating systems have different user / group # Different operating systems have different user / group
# settings for logrotate that makes it impossible to create # settings for logrotate that makes it impossible to create
# a static logrotate file that will work with e.g. # a static logrotate file that will work with e.g.
@ -1858,29 +1854,26 @@ installLogrotate() {
# At some point in the future this list can be pruned, for now we'll need it to ensure updates don't break. # At some point in the future this list can be pruned, for now we'll need it to ensure updates don't break.
# Refactoring of install script has changed the name of a couple of variables. Sort them out here. # Refactoring of install script has changed the name of a couple of variables. Sort them out here.
accountForRefactor() { accountForRefactor() {
sed -i 's/piholeInterface/PIHOLE_INTERFACE/g' ${setupVars} sed -i 's/piholeInterface/PIHOLE_INTERFACE/g' "${setupVars}"
sed -i 's/IPv4_address/IPV4_ADDRESS/g' ${setupVars} sed -i 's/IPv4_address/IPV4_ADDRESS/g' "${setupVars}"
sed -i 's/IPv4addr/IPV4_ADDRESS/g' ${setupVars} sed -i 's/IPv4addr/IPV4_ADDRESS/g' "${setupVars}"
sed -i 's/IPv6_address/IPV6_ADDRESS/g' ${setupVars} sed -i 's/IPv6_address/IPV6_ADDRESS/g' "${setupVars}"
sed -i 's/piholeIPv6/IPV6_ADDRESS/g' ${setupVars} sed -i 's/piholeIPv6/IPV6_ADDRESS/g' "${setupVars}"
sed -i 's/piholeDNS1/PIHOLE_DNS_1/g' ${setupVars} sed -i 's/piholeDNS1/PIHOLE_DNS_1/g' "${setupVars}"
sed -i 's/piholeDNS2/PIHOLE_DNS_2/g' ${setupVars} sed -i 's/piholeDNS2/PIHOLE_DNS_2/g' "${setupVars}"
sed -i 's/^INSTALL_WEB=/INSTALL_WEB_INTERFACE=/' ${setupVars} sed -i 's/^INSTALL_WEB=/INSTALL_WEB_INTERFACE=/' "${setupVars}"
# Add 'INSTALL_WEB_SERVER', if its not been applied already: https://github.com/pi-hole/pi-hole/pull/2115 # Add 'INSTALL_WEB_SERVER', if its not been applied already: https://github.com/pi-hole/pi-hole/pull/2115
if ! grep -q '^INSTALL_WEB_SERVER=' ${setupVars}; then if ! grep -q '^INSTALL_WEB_SERVER=' ${setupVars}; then
local webserver_installed=false local webserver_installed=false
if grep -q '^INSTALL_WEB_INTERFACE=true' ${setupVars}; then if grep -q '^INSTALL_WEB_INTERFACE=true' ${setupVars}; then
webserver_installed=true webserver_installed=true
fi fi
echo -e "INSTALL_WEB_SERVER=$webserver_installed" >> ${setupVars} echo -e "INSTALL_WEB_SERVER=$webserver_installed" >> "${setupVars}"
fi fi
} }
# Install base files and web interface # Install base files and web interface
installPihole() { installPihole() {
# Create the pihole user
create_pihole_user
# If the user wants to install the Web interface, # If the user wants to install the Web interface,
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
if [[ ! -d "${webroot}" ]]; then if [[ ! -d "${webroot}" ]]; then
@ -1892,8 +1885,14 @@ installPihole() {
# Set the owner and permissions # Set the owner and permissions
chown ${LIGHTTPD_USER}:${LIGHTTPD_GROUP} ${webroot} chown ${LIGHTTPD_USER}:${LIGHTTPD_GROUP} ${webroot}
chmod 0775 ${webroot} chmod 0775 ${webroot}
# Repair permissions if /var/www/html is not world readable
chmod a+rx /var/www
chmod a+rx /var/www/html
# Give pihole access to the Web server group # Give pihole access to the Web server group
usermod -a -G ${LIGHTTPD_GROUP} pihole usermod -a -G ${LIGHTTPD_GROUP} pihole
# Give lighttpd access to the pihole group so the web interface can
# manage the gravity.db database
usermod -a -G pihole ${LIGHTTPD_USER}
# If the lighttpd command is executable, # If the lighttpd command is executable,
if is_command lighty-enable-mod ; then if is_command lighty-enable-mod ; then
# enable fastcgi and fastcgi-php # enable fastcgi and fastcgi-php
@ -1931,10 +1930,6 @@ installPihole() {
# Check if dnsmasq is present. If so, disable it and back up any possible # Check if dnsmasq is present. If so, disable it and back up any possible
# config file # config file
disable_dnsmasq disable_dnsmasq
# Configure the firewall
if [[ "${useUpdateVars}" == false ]]; then
configureFirewall
fi
# install a man page entry for pihole # install a man page entry for pihole
install_manpage install_manpage
@ -1945,20 +1940,42 @@ installPihole() {
# SELinux # SELinux
checkSelinux() { checkSelinux() {
# If the getenforce command exists, local DEFAULT_SELINUX
if is_command getenforce ; then local CURRENT_SELINUX
# Store the current mode in a variable local SELINUX_ENFORCING=0
enforceMode=$(getenforce) # Check if a SELinux configuration file exists
printf "\\n %b SELinux mode detected: %s\\n" "${INFO}" "${enforceMode}" if [[ -f /etc/selinux/config ]]; then
# If a SELinux configuration file was found, check the default SELinux mode.
# If it's enforcing, DEFAULT_SELINUX=$(awk -F= '/^SELINUX=/ {print $2}' /etc/selinux/config)
if [[ "${enforceMode}" == "Enforcing" ]]; then case "${DEFAULT_SELINUX,,}" in
# Explain Pi-hole does not support it yet enforcing)
whiptail --defaultno --title "SELinux Enforcing Detected" --yesno "SELinux is being ENFORCED on your system! \\n\\nPi-hole currently does not support SELinux, but you may still continue with the installation.\\n\\nNote: Web Admin will not be fully functional unless you set your policies correctly\\n\\nContinue installing Pi-hole?" ${r} ${c} || \ printf " %b %bDefault SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${DEFAULT_SELINUX}" "${COL_NC}"
{ printf "\\n %bSELinux Enforcing detected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; } SELINUX_ENFORCING=1
printf " %b Continuing installation with SELinux Enforcing\\n" "${INFO}" ;;
printf " %b Please refer to official SELinux documentation to create a custom policy\\n" "${INFO}" *) # 'permissive' and 'disabled'
fi printf " %b %bDefault SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${DEFAULT_SELINUX}" "${COL_NC}"
;;
esac
# Check the current state of SELinux
CURRENT_SELINUX=$(getenforce)
case "${CURRENT_SELINUX,,}" in
enforcing)
printf " %b %bCurrent SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${CURRENT_SELINUX}" "${COL_NC}"
SELINUX_ENFORCING=1
;;
*) # 'permissive' and 'disabled'
printf " %b %bCurrent SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${CURRENT_SELINUX}" "${COL_NC}"
;;
esac
else
echo -e " ${INFO} ${COL_GREEN}SELinux not detected${COL_NC}";
fi
# Exit the installer if any SELinux checks toggled the flag
if [[ "${SELINUX_ENFORCING}" -eq 1 ]] && [[ -z "${PIHOLE_SELINUX}" ]]; then
printf " Pi-hole does not provide an SELinux policy as the required changes modify the security of your system.\\n"
printf " Please refer to https://wiki.centos.org/HowTos/SELinux if SELinux is required for your deployment.\\n"
printf "\\n %bSELinux Enforcing detected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}";
exit 1;
fi fi
} }
@ -1993,7 +2010,7 @@ If you set a new IP address, you should restart the Pi.
The install log is in /etc/pihole. The install log is in /etc/pihole.
${additional}" ${r} ${c} ${additional}" "${r}" "${c}"
} }
update_dialogs() { update_dialogs() {
@ -2014,7 +2031,7 @@ update_dialogs() {
opt2b="This will reset your Pi-hole and allow you to enter new settings." opt2b="This will reset your Pi-hole and allow you to enter new settings."
# Display the information to the user # Display the information to the user
UpdateCmd=$(whiptail --title "Existing Install Detected!" --menu "\\n\\nWe have detected an existing install.\\n\\nPlease choose from the following options: \\n($strAdd)" ${r} ${c} 2 \ UpdateCmd=$(whiptail --title "Existing Install Detected!" --menu "\\n\\nWe have detected an existing install.\\n\\nPlease choose from the following options: \\n($strAdd)" "${r}" "${c}" 2 \
"${opt1a}" "${opt1b}" \ "${opt1a}" "${opt1b}" \
"${opt2a}" "${opt2b}" 3>&2 2>&1 1>&3) || \ "${opt2a}" "${opt2b}" 3>&2 2>&1 1>&3) || \
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; } { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
@ -2103,6 +2120,8 @@ checkout_pull_branch() {
printf " %b %s" "${INFO}" "$str" printf " %b %s" "${INFO}" "$str"
git checkout "${branch}" --quiet || return 1 git checkout "${branch}" --quiet || return 1
printf "%b %b %s\\n" "${OVER}" "${TICK}" "$str" printf "%b %b %s\\n" "${OVER}" "${TICK}" "$str"
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
chmod -R a+rX "${directory}"
git_pull=$(git pull || return 1) git_pull=$(git pull || return 1)
@ -2196,6 +2215,8 @@ FTLinstall() {
# Before stopping FTL, we download the macvendor database # Before stopping FTL, we download the macvendor database
curl -sSL "https://ftl.pi-hole.net/macvendor.db" -o "${PI_HOLE_CONFIG_DIR}/macvendor.db" || true curl -sSL "https://ftl.pi-hole.net/macvendor.db" -o "${PI_HOLE_CONFIG_DIR}/macvendor.db" || true
chmod 644 "${PI_HOLE_CONFIG_DIR}/macvendor.db"
chown pihole:pihole "${PI_HOLE_CONFIG_DIR}/macvendor.db"
# Stop pihole-FTL service if available # Stop pihole-FTL service if available
stop_service pihole-FTL &> /dev/null stop_service pihole-FTL &> /dev/null
@ -2246,6 +2267,7 @@ disable_dnsmasq() {
fi fi
# Create /etc/dnsmasq.conf # Create /etc/dnsmasq.conf
echo "conf-dir=/etc/dnsmasq.d" > "${conffile}" echo "conf-dir=/etc/dnsmasq.d" > "${conffile}"
chmod 644 "${conffile}"
} }
get_binary_name() { get_binary_name() {
@ -2285,9 +2307,15 @@ get_binary_name() {
l_binary="pihole-FTL-arm-linux-gnueabi" l_binary="pihole-FTL-arm-linux-gnueabi"
fi fi
else else
printf "%b %b Detected ARM architecture\\n" "${OVER}" "${TICK}" if [[ -f "/.dockerenv" ]]; then
# set the binary to be used printf "%b %b Detected ARM architecture in docker\\n" "${OVER}" "${TICK}"
l_binary="pihole-FTL-arm-linux-gnueabi" # set the binary to be used
binary="pihole-FTL-armel-native"
else
printf "%b %b Detected ARM architecture\\n" "${OVER}" "${TICK}"
# set the binary to be used
binary="pihole-FTL-arm-linux-gnueabi"
fi
fi fi
elif [[ "${machine}" == "x86_64" ]]; then elif [[ "${machine}" == "x86_64" ]]; then
# This gives the architecture of packages dpkg installs (for example, "i386") # This gives the architecture of packages dpkg installs (for example, "i386")
@ -2439,6 +2467,7 @@ copy_to_install_log() {
# Copy the contents of file descriptor 3 into the install log # Copy the contents of file descriptor 3 into the install log
# Since we use color codes such as '\e[1;33m', they should be removed # Since we use color codes such as '\e[1;33m', they should be removed
sed 's/\[[0-9;]\{1,5\}m//g' < /proc/$$/fd/3 > "${installLogLoc}" sed 's/\[[0-9;]\{1,5\}m//g' < /proc/$$/fd/3 > "${installLogLoc}"
chmod 644 "${installLogLoc}"
} }
main() { main() {
@ -2523,7 +2552,7 @@ main() {
# Display welcome dialogs # Display welcome dialogs
welcomeDialogs welcomeDialogs
# Create directory for Pi-hole storage # Create directory for Pi-hole storage
mkdir -p /etc/pihole/ install -d -m 755 /etc/pihole/
# Determine available interfaces # Determine available interfaces
get_available_interfaces get_available_interfaces
# Find interfaces and let the user choose one # Find interfaces and let the user choose one
@ -2545,7 +2574,7 @@ main() {
installDefaultBlocklists installDefaultBlocklists
# Source ${setupVars} to use predefined user variables in the functions # Source ${setupVars} to use predefined user variables in the functions
source ${setupVars} source "${setupVars}"
# Get the privacy level if it exists (default is 0) # Get the privacy level if it exists (default is 0)
if [[ -f "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then if [[ -f "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then
@ -2609,7 +2638,7 @@ main() {
pw=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8) pw=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8)
# shellcheck disable=SC1091 # shellcheck disable=SC1091
. /opt/pihole/webpage.sh . /opt/pihole/webpage.sh
echo "WEBPASSWORD=$(HashPassword ${pw})" >> ${setupVars} echo "WEBPASSWORD=$(HashPassword "${pw}")" >> "${setupVars}"
fi fi
fi fi

@ -156,7 +156,7 @@ removeNoPurge() {
# Restore Resolved # Restore Resolved
if [[ -e /etc/systemd/resolved.conf.orig ]]; then if [[ -e /etc/systemd/resolved.conf.orig ]]; then
${SUDO} cp /etc/systemd/resolved.conf.orig /etc/systemd/resolved.conf ${SUDO} cp -p /etc/systemd/resolved.conf.orig /etc/systemd/resolved.conf
systemctl reload-or-restart systemd-resolved systemctl reload-or-restart systemd-resolved
fi fi

@ -17,37 +17,34 @@ coltable="/opt/pihole/COL_TABLE"
source "${coltable}" source "${coltable}"
regexconverter="/opt/pihole/wildcard_regex_converter.sh" regexconverter="/opt/pihole/wildcard_regex_converter.sh"
source "${regexconverter}" source "${regexconverter}"
# shellcheck disable=SC1091
source "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh"
basename="pihole" basename="pihole"
PIHOLE_COMMAND="/usr/local/bin/${basename}" PIHOLE_COMMAND="/usr/local/bin/${basename}"
piholeDir="/etc/${basename}" piholeDir="/etc/${basename}"
adListFile="${piholeDir}/adlists.list" # Legacy (pre v5.0) list file locations
adListDefault="${piholeDir}/adlists.default"
whitelistFile="${piholeDir}/whitelist.txt" whitelistFile="${piholeDir}/whitelist.txt"
blacklistFile="${piholeDir}/blacklist.txt" blacklistFile="${piholeDir}/blacklist.txt"
regexFile="${piholeDir}/regex.list" regexFile="${piholeDir}/regex.list"
adListFile="${piholeDir}/adlists.list"
adList="${piholeDir}/gravity.list"
blackList="${piholeDir}/black.list"
localList="${piholeDir}/local.list" localList="${piholeDir}/local.list"
VPNList="/etc/openvpn/ipp.txt" VPNList="/etc/openvpn/ipp.txt"
domainsExtension="domains" piholeGitDir="/etc/.pihole"
matterAndLight="${basename}.0.matterandlight.txt" gravityDBfile="${piholeDir}/gravity.db"
parsedMatter="${basename}.1.parsedmatter.txt" gravityTEMPfile="${piholeDir}/gravity_temp.db"
whitelistMatter="${basename}.2.whitelistmatter.txt" gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql"
accretionDisc="${basename}.3.accretionDisc.txt" gravityDBcopy="${piholeGitDir}/advanced/Templates/gravity_copy.sql"
preEventHorizon="list.preEventHorizon" optimize_database=false
skipDownload="false" domainsExtension="domains"
resolver="pihole-FTL" resolver="pihole-FTL"
haveSourceUrls=true
# Source setupVars from install script # Source setupVars from install script
setupVars="${piholeDir}/setupVars.conf" setupVars="${piholeDir}/setupVars.conf"
if [[ -f "${setupVars}" ]];then if [[ -f "${setupVars}" ]];then
@ -83,31 +80,186 @@ if [[ -r "${piholeDir}/pihole.conf" ]]; then
echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}" echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}"
fi fi
# Determine if Pi-hole blocking is disabled # Generate new sqlite3 file from schema template
# If this is the case, we want to update generate_gravity_database() {
# gravity.list.bck and black.list.bck instead of sqlite3 "${1}" < "${gravityDBschema}"
# gravity.list and black.list }
detect_pihole_blocking_status() {
if [[ "${BLOCKING_ENABLED}" == false ]]; then # Copy data from old to new database file and swap them
echo -e " ${INFO} Pi-hole blocking is disabled" gravity_swap_databases() {
adList="${adList}.bck" local str
blackList="${blackList}.bck" str="Building tree"
else echo -ne " ${INFO} ${str}..."
echo -e " ${INFO} Pi-hole blocking is enabled"
# The index is intentionally not UNIQUE as prro quality adlists may contain domains more than once
output=$( { sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to build gravity tree in ${gravityTEMPfile}\\n ${output}"
return 1
fi
echo -e "${OVER} ${TICK} ${str}"
str="Swapping databases"
echo -ne " ${INFO} ${str}..."
output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBcopy}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
return 1
fi fi
echo -e "${OVER} ${TICK} ${str}"
# Swap databases and remove old database
rm "${gravityDBfile}"
mv "${gravityTEMPfile}" "${gravityDBfile}"
}
# Update timestamp when the gravity table was last updated successfully
update_gravity_timestamp() {
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to update gravity timestamp in database ${gravityDBfile}\\n ${output}"
return 1
fi
return 0
}
# Import domains from file and store them in the specified database table
database_table_from_file() {
# Define locals
local table source backup_path backup_file tmpFile type
table="${1}"
source="${2}"
backup_path="${piholeDir}/migration_backup"
backup_file="${backup_path}/$(basename "${2}")"
tmpFile="$(mktemp -p "/tmp" --suffix=".gravity")"
local timestamp
timestamp="$(date --utc +'%s')"
local rowid
declare -i rowid
rowid=1
# Special handling for domains to be imported into the common domainlist table
if [[ "${table}" == "whitelist" ]]; then
type="0"
table="domainlist"
elif [[ "${table}" == "blacklist" ]]; then
type="1"
table="domainlist"
elif [[ "${table}" == "regex" ]]; then
type="3"
table="domainlist"
fi
# Get MAX(id) from domainlist when INSERTing into this table
if [[ "${table}" == "domainlist" ]]; then
rowid="$(sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
if [[ -z "$rowid" ]]; then
rowid=0
fi
rowid+=1
fi
# Loop over all domains in ${source} file
# Read file line by line
grep -v '^ *#' < "${source}" | while IFS= read -r domain
do
# Only add non-empty lines
if [[ -n "${domain}" ]]; then
if [[ "${table}" == "domain_audit" ]]; then
# domain_audit table format (no enable or modified fields)
echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
elif [[ "${table}" == "adlist" ]]; then
# Adlist table format
echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
else
# White-, black-, and regexlist table format
echo "${rowid},${type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
fi
rowid+=1
fi
done
# Store domains in database table specified by ${table}
# Use printf as .mode and .import need to be on separate lines
# see https://unix.stackexchange.com/a/445615/83260
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to fill table ${table}${type} in database ${gravityDBfile}\\n ${output}"
gravity_Cleanup "error"
fi
# Move source file to backup directory, create directory if not existing
mkdir -p "${backup_path}"
mv "${source}" "${backup_file}" 2> /dev/null || \
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
# Delete tmpFile
rm "${tmpFile}" > /dev/null 2>&1 || \
echo -e " ${CROSS} Unable to remove ${tmpFile}"
}
# Migrate pre-v5.0 list files to database-based Pi-hole versions
migrate_to_database() {
# Create database file only if not present
if [ ! -e "${gravityDBfile}" ]; then
# Create new database file - note that this will be created in version 1
echo -e " ${INFO} Creating new gravity database"
generate_gravity_database "${gravityDBfile}"
# Check if gravity database needs to be updated
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
# Migrate list files to new database
if [ -e "${adListFile}" ]; then
# Store adlist domains in database
echo -e " ${INFO} Migrating content of ${adListFile} into new database"
database_table_from_file "adlist" "${adListFile}"
fi
if [ -e "${blacklistFile}" ]; then
# Store blacklisted domains in database
echo -e " ${INFO} Migrating content of ${blacklistFile} into new database"
database_table_from_file "blacklist" "${blacklistFile}"
fi
if [ -e "${whitelistFile}" ]; then
# Store whitelisted domains in database
echo -e " ${INFO} Migrating content of ${whitelistFile} into new database"
database_table_from_file "whitelist" "${whitelistFile}"
fi
if [ -e "${regexFile}" ]; then
# Store regex domains in database
# Important note: We need to add the domains to the "regex" table
# as it will only later be renamed to "regex_blacklist"!
echo -e " ${INFO} Migrating content of ${regexFile} into new database"
database_table_from_file "regex" "${regexFile}"
fi
fi
# Check if gravity database needs to be updated
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
} }
# Determine if DNS resolution is available before proceeding # Determine if DNS resolution is available before proceeding
gravity_CheckDNSResolutionAvailable() { gravity_CheckDNSResolutionAvailable() {
local lookupDomain="pi.hole" local lookupDomain="pi.hole"
# Determine if $localList does not exist # Determine if $localList does not exist, and ensure it is not empty
if [[ ! -e "${localList}" ]]; then if [[ ! -e "${localList}" ]] || [[ -s "${localList}" ]]; then
lookupDomain="raw.githubusercontent.com" lookupDomain="raw.githubusercontent.com"
fi fi
# Determine if $lookupDomain is resolvable # Determine if $lookupDomain is resolvable
if timeout 1 getent hosts "${lookupDomain}" &> /dev/null; then if timeout 4 getent hosts "${lookupDomain}" &> /dev/null; then
# Print confirmation of resolvability if it had previously failed # Print confirmation of resolvability if it had previously failed
if [[ -n "${secs:-}" ]]; then if [[ -n "${secs:-}" ]]; then
echo -e "${OVER} ${TICK} DNS resolution is now available\\n" echo -e "${OVER} ${TICK} DNS resolution is now available\\n"
@ -121,7 +273,7 @@ gravity_CheckDNSResolutionAvailable() {
# If the /etc/resolv.conf contains resolvers other than 127.0.0.1 then the local dnsmasq will not be queried and pi.hole is NXDOMAIN. # If the /etc/resolv.conf contains resolvers other than 127.0.0.1 then the local dnsmasq will not be queried and pi.hole is NXDOMAIN.
# This means that even though name resolution is working, the getent hosts check fails and the holddown timer keeps ticking and eventualy fails # This means that even though name resolution is working, the getent hosts check fails and the holddown timer keeps ticking and eventualy fails
# So we check the output of the last command and if it failed, attempt to use dig +short as a fallback # So we check the output of the last command and if it failed, attempt to use dig +short as a fallback
if timeout 1 dig +short "${lookupDomain}" &> /dev/null; then if timeout 4 dig +short "${lookupDomain}" &> /dev/null; then
if [[ -n "${secs:-}" ]]; then if [[ -n "${secs:-}" ]]; then
echo -e "${OVER} ${TICK} DNS resolution is now available\\n" echo -e "${OVER} ${TICK} DNS resolution is now available\\n"
fi fi
@ -153,19 +305,14 @@ gravity_CheckDNSResolutionAvailable() {
gravity_CheckDNSResolutionAvailable gravity_CheckDNSResolutionAvailable
} }
# Retrieve blocklist URLs and parse domains from adlists.list # Retrieve blocklist URLs and parse domains from adlist.list
gravity_GetBlocklistUrls() { gravity_DownloadBlocklists() {
echo -e " ${INFO} ${COL_BOLD}Neutrino emissions detected${COL_NC}..." echo -e " ${INFO} ${COL_BOLD}Neutrino emissions detected${COL_NC}..."
if [[ -f "${adListDefault}" ]] && [[ -f "${adListFile}" ]]; then # Retrieve source URLs from gravity database
# Remove superceded $adListDefault file # We source only enabled adlists, sqlite3 stores boolean values as 0 (false) or 1 (true)
rm "${adListDefault}" 2> /dev/null || \ mapfile -t sources <<< "$(sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
echo -e " ${CROSS} Unable to remove ${adListDefault}" mapfile -t sourceIDs <<< "$(sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
fi
# Retrieve source URLs from $adListFile
# Logic: Remove comments and empty lines
mapfile -t sources <<< "$(grep -v -E "^(#|$)" "${adListFile}" 2> /dev/null)"
# Parse source domains from $sources # Parse source domains from $sources
mapfile -t sourceDomains <<< "$( mapfile -t sourceDomains <<< "$(
@ -186,16 +333,28 @@ gravity_GetBlocklistUrls() {
echo -e "${OVER} ${CROSS} ${str}" echo -e "${OVER} ${CROSS} ${str}"
echo -e " ${INFO} No source list found, or it is empty" echo -e " ${INFO} No source list found, or it is empty"
echo "" echo ""
haveSourceUrls=false return 1
fi fi
}
# Define options for when retrieving blocklists
gravity_SetDownloadOptions() {
local url domain agent cmd_ext str
local url domain agent cmd_ext str target
echo "" echo ""
# Prepare new gravity database
str="Preparing new gravity database"
echo -ne " ${INFO} ${str}..."
rm "${gravityTEMPfile}" > /dev/null 2>&1
output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to create new database ${gravityTEMPfile}\\n ${output}"
gravity_Cleanup "error"
else
echo -e "${OVER} ${TICK} ${str}"
fi
target="$(mktemp -p "/tmp" --suffix=".gravity")"
# Loop through $sources and download each one # Loop through $sources and download each one
for ((i = 0; i < "${#sources[@]}"; i++)); do for ((i = 0; i < "${#sources[@]}"; i++)); do
url="${sources[$i]}" url="${sources[$i]}"
@ -214,18 +373,90 @@ gravity_SetDownloadOptions() {
*) cmd_ext="";; *) cmd_ext="";;
esac esac
if [[ "${skipDownload}" == false ]]; then echo -e " ${INFO} Target: ${url}"
echo -e " ${INFO} Target: ${domain} (${url##*/})" local regex
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" # Check for characters NOT allowed in URLs
echo "" regex="[^a-zA-Z0-9:/?&%=~._()-]"
if [[ "${url}" =~ ${regex} ]]; then
echo -e " ${CROSS} Invalid Target"
else
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}"
fi fi
echo ""
done done
str="Storing downloaded domains in new gravity database"
echo -ne " ${INFO} ${str}..."
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | sqlite3 "${gravityTEMPfile}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to fill gravity table in database ${gravityTEMPfile}\\n ${output}"
gravity_Cleanup "error"
else
echo -e "${OVER} ${TICK} ${str}"
fi
if [[ "${status}" -eq 0 && -n "${output}" ]]; then
echo -e " Encountered non-critical SQL warnings. Please check the suitability of the lists you're using!\\n\\n SQL warnings:"
local warning file line lineno
while IFS= read -r line; do
echo " - ${line}"
warning="$(grep -oh "^[^:]*:[0-9]*" <<< "${line}")"
file="${warning%:*}"
lineno="${warning#*:}"
if [[ -n "${file}" && -n "${lineno}" ]]; then
echo -n " Line contains: "
awk "NR==${lineno}" < "${file}"
fi
done <<< "${output}"
echo ""
fi
rm "${target}" > /dev/null 2>&1 || \
echo -e " ${CROSS} Unable to remove ${target}"
gravity_Blackbody=true gravity_Blackbody=true
} }
total_num=0
parseList() {
local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
# This sed does the following things:
# 1. Remove all domains containing invalid characters. Valid are: a-z, A-Z, 0-9, dot (.), minus (-), underscore (_)
# 2. Append ,adlistID to every line
# 3. Ensures there is a newline on the last line
sed -e "/[^a-zA-Z0-9.\_-]/d;s/$/,${adlistID}/;/.$/a\\" "${src}" >> "${target}"
# Find (up to) five domains containing invalid characters (see above)
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
local num_lines num_target_lines num_correct_lines num_invalid
# Get number of lines in source file
num_lines="$(grep -c "^" "${src}")"
# Get number of lines in destination file
num_target_lines="$(grep -c "^" "${target}")"
num_correct_lines="$(( num_target_lines-total_num ))"
total_num="$num_target_lines"
num_invalid="$(( num_lines-num_correct_lines ))"
if [[ "${num_invalid}" -eq 0 ]]; then
echo " ${INFO} Received ${num_lines} domains"
else
echo " ${INFO} Received ${num_lines} domains, ${num_invalid} domains invalid!"
fi
# Display sample of invalid lines if we found some
if [[ -n "${incorrect_lines}" ]]; then
echo " Sample of invalid domains:"
while IFS= read -r line; do
echo " - ${line}"
done <<< "${incorrect_lines}"
fi
}
# Download specified URL and perform checks on HTTP status and file content # Download specified URL and perform checks on HTTP status and file content
gravity_DownloadBlocklistFromUrl() { gravity_DownloadBlocklistFromUrl() {
local url="${1}" cmd_ext="${2}" agent="${3}" heisenbergCompensator="" patternBuffer str httpCode success="" local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" saveLocation="${5}" target="${6}"
local heisenbergCompensator="" patternBuffer str httpCode success=""
# Create temp file to store content on disk instead of RAM # Create temp file to store content on disk instead of RAM
patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb") patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb")
@ -306,11 +537,14 @@ gravity_DownloadBlocklistFromUrl() {
# Determine if the blocklist was downloaded and saved correctly # Determine if the blocklist was downloaded and saved correctly
if [[ "${success}" == true ]]; then if [[ "${success}" == true ]]; then
if [[ "${httpCode}" == "304" ]]; then if [[ "${httpCode}" == "304" ]]; then
: # Do not attempt to re-parse file # Add domains to database table file
parseList "${adlistID}" "${saveLocation}" "${target}"
# Check if $patternbuffer is a non-zero length file # Check if $patternbuffer is a non-zero length file
elif [[ -s "${patternBuffer}" ]]; then elif [[ -s "${patternBuffer}" ]]; then
# Determine if blocklist is non-standard and parse as appropriate # Determine if blocklist is non-standard and parse as appropriate
gravity_ParseFileIntoDomains "${patternBuffer}" "${saveLocation}" gravity_ParseFileIntoDomains "${patternBuffer}" "${saveLocation}"
# Add domains to database table file
parseList "${adlistID}" "${saveLocation}" "${target}"
else else
# Fall back to previously cached list if $patternBuffer is empty # Fall back to previously cached list if $patternBuffer is empty
echo -e " ${INFO} Received empty file: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}" echo -e " ${INFO} Received empty file: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
@ -319,6 +553,8 @@ gravity_DownloadBlocklistFromUrl() {
# Determine if cached list has read permission # Determine if cached list has read permission
if [[ -r "${saveLocation}" ]]; then if [[ -r "${saveLocation}" ]]; then
echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
# Add domains to database table file
parseList "${adlistID}" "${saveLocation}" "${target}"
else else
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
fi fi
@ -327,24 +563,29 @@ gravity_DownloadBlocklistFromUrl() {
# Parse source files into domains format # Parse source files into domains format
gravity_ParseFileIntoDomains() { gravity_ParseFileIntoDomains() {
local source="${1}" destination="${2}" firstLine abpFilter local source="${1}" destination="${2}" firstLine
# Determine if we are parsing a consolidated list # Determine if we are parsing a consolidated list
if [[ "${source}" == "${piholeDir}/${matterAndLight}" ]]; then #if [[ "${source}" == "${piholeDir}/${matterAndLight}" ]]; then
# Remove comments and print only the domain name # Remove comments and print only the domain name
# Most of the lists downloaded are already in hosts file format but the spacing/formating is not contigious # Most of the lists downloaded are already in hosts file format but the spacing/formating is not contigious
# This helps with that and makes it easier to read # This helps with that and makes it easier to read
# It also helps with debugging so each stage of the script can be researched more in depth # It also helps with debugging so each stage of the script can be researched more in depth
# Awk -F splits on given IFS, we grab the right hand side (chops trailing #coments and /'s to grab the domain only. # 1) Remove carriage returns
# Last awk command takes non-commented lines and if they have 2 fields, take the right field (the domain) and leave # 2) Convert all characters to lowercase
# the left (IP address), otherwise grab the single field. # 3) Remove comments (text starting with "#", include possible spaces before the hash sign)
# 4) Remove lines containing "/"
< ${source} awk -F '#' '{print $1}' | \ # 5) Remove leading tabs, spaces, etc.
awk -F '/' '{print $1}' | \ # 6) Delete lines not matching domain names
awk '($1 !~ /^#/) { if (NF>1) {print $2} else {print $1}}' | \ < "${source}" tr -d '\r' | \
sed -nr -e 's/\.{2,}/./g' -e '/\./p' > ${destination} tr '[:upper:]' '[:lower:]' | \
sed 's/\s*#.*//g' | \
sed -r '/(\/).*$/d' | \
sed -r 's/^.*\s+//g' | \
sed -r '/([^\.]+\.)+[^\.]{2,}/!d' > "${destination}"
chmod 644 "${destination}"
return 0 return 0
fi #fi
# Individual file parsing: Keep comments, while parsing domains from each line # Individual file parsing: Keep comments, while parsing domains from each line
# We keep comments to respect the list maintainer's licensing # We keep comments to respect the list maintainer's licensing
@ -374,11 +615,13 @@ gravity_ParseFileIntoDomains() {
# Print if nonempty # Print if nonempty
length { print } length { print }
' "${source}" 2> /dev/null > "${destination}" ' "${source}" 2> /dev/null > "${destination}"
chmod 644 "${destination}"
echo -e "${OVER} ${TICK} Format: URL" echo -e "${OVER} ${TICK} Format: URL"
else else
# Default: Keep hosts/domains file in same format as it was downloaded # Default: Keep hosts/domains file in same format as it was downloaded
output=$( { mv "${source}" "${destination}"; } 2>&1 ) output=$( { mv "${source}" "${destination}"; } 2>&1 )
chmod 644 "${destination}"
if [[ ! -e "${destination}" ]]; then if [[ ! -e "${destination}" ]]; then
echo -e "\\n ${CROSS} Unable to move tmp file to ${piholeDir} echo -e "\\n ${CROSS} Unable to move tmp file to ${piholeDir}
@ -388,103 +631,29 @@ gravity_ParseFileIntoDomains() {
fi fi
} }
# Create (unfiltered) "Matter and Light" consolidated list # Report number of entries in a table
gravity_ConsolidateDownloadedBlocklists() { gravity_Table_Count() {
local str lastLine local table="${1}"
local str="${2}"
str="Consolidating blocklists" local num
if [[ "${haveSourceUrls}" == true ]]; then num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")"
echo -ne " ${INFO} ${str}..." if [[ "${table}" == "vw_gravity" ]]; then
fi local unique
unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
# Empty $matterAndLight if it already exists, otherwise, create it echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
: > "${piholeDir}/${matterAndLight}" sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
else
# Loop through each *.domains file echo -e " ${INFO} Number of ${str}: ${num}"
for i in "${activeDomains[@]}"; do
# Determine if file has read permissions, as download might have failed
if [[ -r "${i}" ]]; then
# Remove windows CRs from file, convert list to lower case, and append into $matterAndLight
tr -d '\r' < "${i}" | tr '[:upper:]' '[:lower:]' >> "${piholeDir}/${matterAndLight}"
# Ensure that the first line of a new list is on a new line
lastLine=$(tail -1 "${piholeDir}/${matterAndLight}")
if [[ "${#lastLine}" -gt 0 ]]; then
echo "" >> "${piholeDir}/${matterAndLight}"
fi
fi
done
if [[ "${haveSourceUrls}" == true ]]; then
echo -e "${OVER} ${TICK} ${str}"
fi
}
# Parse consolidated list into (filtered, unique) domains-only format
gravity_SortAndFilterConsolidatedList() {
local str num
str="Extracting domains from blocklists"
if [[ "${haveSourceUrls}" == true ]]; then
echo -ne " ${INFO} ${str}..."
fi
# Parse into hosts file
gravity_ParseFileIntoDomains "${piholeDir}/${matterAndLight}" "${piholeDir}/${parsedMatter}"
# Format $parsedMatter line total as currency
num=$(printf "%'.0f" "$(wc -l < "${piholeDir}/${parsedMatter}")")
if [[ "${haveSourceUrls}" == true ]]; then
echo -e "${OVER} ${TICK} ${str}"
fi
echo -e " ${INFO} Number of domains being pulled in by gravity: ${COL_BLUE}${num}${COL_NC}"
str="Removing duplicate domains"
if [[ "${haveSourceUrls}" == true ]]; then
echo -ne " ${INFO} ${str}..."
fi
sort -u "${piholeDir}/${parsedMatter}" > "${piholeDir}/${preEventHorizon}"
if [[ "${haveSourceUrls}" == true ]]; then
echo -e "${OVER} ${TICK} ${str}"
# Format $preEventHorizon line total as currency
num=$(printf "%'.0f" "$(wc -l < "${piholeDir}/${preEventHorizon}")")
echo -e " ${INFO} Number of unique domains trapped in the Event Horizon: ${COL_BLUE}${num}${COL_NC}"
fi
}
# Whitelist user-defined domains
gravity_Whitelist() {
local num str
if [[ ! -f "${whitelistFile}" ]]; then
echo -e " ${INFO} Nothing to whitelist!"
return 0
fi fi
num=$(wc -l < "${whitelistFile}")
str="Number of whitelisted domains: ${num}"
echo -ne " ${INFO} ${str}..."
# Print everything from preEventHorizon into whitelistMatter EXCEPT domains in $whitelistFile
comm -23 "${piholeDir}/${preEventHorizon}" <(sort "${whitelistFile}") > "${piholeDir}/${whitelistMatter}"
echo -e "${OVER} ${INFO} ${str}"
} }
# Output count of blacklisted domains and regex filters # Output count of blacklisted domains and regex filters
gravity_ShowBlockCount() { gravity_ShowCount() {
local num gravity_Table_Count "vw_gravity" "gravity domains" ""
gravity_Table_Count "vw_blacklist" "exact blacklisted domains"
if [[ -f "${blacklistFile}" ]]; then gravity_Table_Count "vw_regex_blacklist" "regex blacklist filters"
num=$(printf "%'.0f" "$(wc -l < "${blacklistFile}")") gravity_Table_Count "vw_whitelist" "exact whitelisted domains"
echo -e " ${INFO} Number of blacklisted domains: ${num}" gravity_Table_Count "vw_regex_whitelist" "regex whitelist filters"
fi
if [[ -f "${regexFile}" ]]; then
num=$(grep -cv "^#" "${regexFile}")
echo -e " ${INFO} Number of regex filters: ${num}"
fi
} }
# Parse list of domains into hosts format # Parse list of domains into hosts format
@ -504,7 +673,7 @@ gravity_ParseDomainsIntoHosts() {
} }
# Create "localhost" entries into hosts format # Create "localhost" entries into hosts format
gravity_ParseLocalDomains() { gravity_generateLocalList() {
local hostname local hostname
if [[ -s "/etc/hostname" ]]; then if [[ -s "/etc/hostname" ]]; then
@ -520,6 +689,7 @@ gravity_ParseLocalDomains() {
# Empty $localList if it already exists, otherwise, create it # Empty $localList if it already exists, otherwise, create it
: > "${localList}" : > "${localList}"
chmod 644 "${localList}"
gravity_ParseDomainsIntoHosts "${localList}.tmp" "${localList}" gravity_ParseDomainsIntoHosts "${localList}.tmp" "${localList}"
@ -529,40 +699,6 @@ gravity_ParseLocalDomains() {
fi fi
} }
# Create primary blacklist entries
gravity_ParseBlacklistDomains() {
local output status
# Empty $accretionDisc if it already exists, otherwise, create it
: > "${piholeDir}/${accretionDisc}"
if [[ -f "${piholeDir}/${whitelistMatter}" ]]; then
mv "${piholeDir}/${whitelistMatter}" "${piholeDir}/${accretionDisc}"
else
# There was no whitelist file, so use preEventHorizon instead of whitelistMatter.
cp "${piholeDir}/${preEventHorizon}" "${piholeDir}/${accretionDisc}"
fi
# Move the file over as /etc/pihole/gravity.list so dnsmasq can use it
output=$( { mv "${piholeDir}/${accretionDisc}" "${adList}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to move ${accretionDisc} from ${piholeDir}\\n ${output}"
gravity_Cleanup "error"
fi
}
# Create user-added blacklist entries
gravity_ParseUserDomains() {
if [[ ! -f "${blacklistFile}" ]]; then
return 0
fi
# Copy the file over as /etc/pihole/black.list so dnsmasq can use it
cp "${blacklistFile}" "${blackList}" 2> /dev/null || \
echo -e "\\n ${CROSS} Unable to move ${blacklistFile##*/} to ${piholeDir}"
}
# Trap Ctrl-C # Trap Ctrl-C
gravity_Trap() { gravity_Trap() {
trap '{ echo -e "\\n\\n ${INFO} ${COL_LIGHT_RED}User-abort detected${COL_NC}"; gravity_Cleanup "error"; }' INT trap '{ echo -e "\\n\\n ${INFO} ${COL_LIGHT_RED}User-abort detected${COL_NC}"; gravity_Cleanup "error"; }' INT
@ -583,7 +719,7 @@ gravity_Cleanup() {
# Ensure this function only runs when gravity_SetDownloadOptions() has completed # Ensure this function only runs when gravity_SetDownloadOptions() has completed
if [[ "${gravity_Blackbody:-}" == true ]]; then if [[ "${gravity_Blackbody:-}" == true ]]; then
# Remove any unused .domains files # Remove any unused .domains files
for file in ${piholeDir}/*.${domainsExtension}; do for file in "${piholeDir}"/*."${domainsExtension}"; do
# If list is not in active array, then remove it # If list is not in active array, then remove it
if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then
rm -f "${file}" 2> /dev/null || \ rm -f "${file}" 2> /dev/null || \
@ -594,6 +730,21 @@ gravity_Cleanup() {
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
if ${optimize_database} ; then
str="Optimizing domains database"
echo -ne " ${INFO} ${str}..."
# Run VACUUM command on database to optimize it
output=$( { sqlite3 "${gravityDBfile}" "VACUUM;"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to optimize gravity database ${gravityDBfile}\\n ${output}"
error="error"
else
echo -e "${OVER} ${TICK} ${str}"
fi
fi
# Only restart DNS service if offline # Only restart DNS service if offline
if ! pidof ${resolver} &> /dev/null; then if ! pidof ${resolver} &> /dev/null; then
"${PIHOLE_COMMAND}" restartdns "${PIHOLE_COMMAND}" restartdns
@ -620,17 +771,28 @@ Options:
for var in "$@"; do for var in "$@"; do
case "${var}" in case "${var}" in
"-f" | "--force" ) forceDelete=true;; "-f" | "--force" ) forceDelete=true;;
"-o" | "--optimize" ) optimize_database=true;;
"-r" | "--recreate" ) recreate_database=true;;
"-h" | "--help" ) helpFunc;; "-h" | "--help" ) helpFunc;;
"-sd" | "--skip-download" ) skipDownload=true;;
"-b" | "--blacklist-only" ) listType="blacklist";;
"-w" | "--whitelist-only" ) listType="whitelist";;
"-wild" | "--wildcard-only" ) listType="wildcard"; dnsRestartType="restart";;
esac esac
done done
# Trap Ctrl-C # Trap Ctrl-C
gravity_Trap gravity_Trap
if [[ "${recreate_database:-}" == true ]]; then
str="Restoring from migration backup"
echo -ne "${INFO} ${str}..."
rm "${gravityDBfile}"
pushd "${piholeDir}" > /dev/null || exit
cp migration_backup/* .
popd > /dev/null || exit
echo -e "${OVER} ${TICK} ${str}"
fi
# Move possibly existing legacy files to the gravity database
migrate_to_database
if [[ "${forceDelete:-}" == true ]]; then if [[ "${forceDelete:-}" == true ]]; then
str="Deleting existing list cache" str="Deleting existing list cache"
echo -ne "${INFO} ${str}..." echo -ne "${INFO} ${str}..."
@ -639,56 +801,32 @@ if [[ "${forceDelete:-}" == true ]]; then
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
fi fi
detect_pihole_blocking_status # Gravity downloads blocklists next
gravity_CheckDNSResolutionAvailable
gravity_DownloadBlocklists
# Determine which functions to run # Create local.list
if [[ "${skipDownload}" == false ]]; then gravity_generateLocalList
# Gravity needs to download blocklists
gravity_CheckDNSResolutionAvailable
gravity_GetBlocklistUrls
if [[ "${haveSourceUrls}" == true ]]; then
gravity_SetDownloadOptions
fi
gravity_ConsolidateDownloadedBlocklists
gravity_SortAndFilterConsolidatedList
else
# Gravity needs to modify Blacklist/Whitelist/Wildcards
echo -e " ${INFO} Using cached Event Horizon list..."
numberOf=$(printf "%'.0f" "$(wc -l < "${piholeDir}/${preEventHorizon}")")
echo -e " ${INFO} ${COL_BLUE}${numberOf}${COL_NC} unique domains trapped in the Event Horizon"
fi
# Perform when downloading blocklists, or modifying the whitelist # Migrate rest of the data from old to new database
if [[ "${skipDownload}" == false ]] || [[ "${listType}" == "whitelist" ]]; then gravity_swap_databases
gravity_Whitelist
fi
convert_wildcard_to_regex # Update gravity timestamp
gravity_ShowBlockCount update_gravity_timestamp
# Perform when downloading blocklists, or modifying the white/blacklist (not wildcards) # Ensure proper permissions are set for the database
if [[ "${skipDownload}" == false ]] || [[ "${listType}" == *"list" ]]; then chown pihole:pihole "${gravityDBfile}"
str="Parsing domains into hosts format" chmod g+w "${piholeDir}" "${gravityDBfile}"
echo -ne " ${INFO} ${str}..."
gravity_ParseUserDomains
# Perform when downloading blocklists # Compute numbers to be displayed
if [[ ! "${listType:-}" == "blacklist" ]]; then gravity_ShowCount
gravity_ParseLocalDomains
gravity_ParseBlacklistDomains
fi
echo -e "${OVER} ${TICK} ${str}"
gravity_Cleanup # Determine if DNS has been restarted by this instance of gravity
if [[ -z "${dnsWasOffline:-}" ]]; then
"${PIHOLE_COMMAND}" restartdns reload
fi fi
gravity_Cleanup
echo "" echo ""
# Determine if DNS has been restarted by this instance of gravity
if [[ -z "${dnsWasOffline:-}" ]]; then
# Use "force-reload" when restarting dnsmasq for everything but Wildcards
"${PIHOLE_COMMAND}" restartdns "${dnsRestartType:-force-reload}"
fi
"${PIHOLE_COMMAND}" status "${PIHOLE_COMMAND}" status

@ -1,4 +1,4 @@
.TH "Pi-hole" "8" "Pi-hole" "Pi-hole" "May 2018" .TH "Pi-hole" "8" "Pi-hole" "Pi-hole" "April 2020"
.SH "NAME" .SH "NAME"
Pi-hole : A black-hole for internet advertisements Pi-hole : A black-hole for internet advertisements
@ -11,8 +11,6 @@ Pi-hole : A black-hole for internet advertisements
.br .br
\fBpihole -a\fR (\fB-c|-f|-k\fR) \fBpihole -a\fR (\fB-c|-f|-k\fR)
.br .br
\fBpihole -a\fR [\fB-r\fR hostrecord]
.br
\fBpihole -a -e\fR email \fBpihole -a -e\fR email
.br .br
\fBpihole -a -i\fR interface \fBpihole -a -i\fR interface
@ -43,7 +41,7 @@ pihole -g\fR
.br .br
pihole status pihole status
.br .br
pihole restartdns\fR pihole restartdns\fR [options]
.br .br
\fBpihole\fR (\fBenable\fR|\fBdisable\fR [time]) \fBpihole\fR (\fBenable\fR|\fBdisable\fR [time])
.br .br
@ -66,14 +64,24 @@ Available commands and options:
Adds or removes specified domain or domains to the blacklist Adds or removes specified domain or domains to the blacklist
.br .br
\fB--regex, regex\fR [options] [<regex1> <regex2 ...>]
.br
Add or removes specified regex filter to the regex blacklist
.br
\fB--white-regex\fR [options] [<regex1> <regex2 ...>]
.br
Add or removes specified regex filter to the regex whitelist
.br
\fB--wild, wildcard\fR [options] [<domain1> <domain2 ...>] \fB--wild, wildcard\fR [options] [<domain1> <domain2 ...>]
.br .br
Add or removes specified domain to the wildcard blacklist Add or removes specified domain to the wildcard blacklist
.br .br
\fB--regex, regex\fR [options] [<regex1> <regex2 ...>] \fB--white-wild\fR [options] [<domain1> <domain2 ...>]
.br .br
Add or removes specified regex filter to the regex blacklist Add or removes specified domain to the wildcard whitelist
.br .br
(Whitelist/Blacklist manipulation options): (Whitelist/Blacklist manipulation options):
@ -124,9 +132,6 @@ Available commands and options:
-f, fahrenheit Set Fahrenheit as preferred temperature unit -f, fahrenheit Set Fahrenheit as preferred temperature unit
.br .br
-k, kelvin Set Kelvin as preferred temperature unit -k, kelvin Set Kelvin as preferred temperature unit
.br
-r, hostrecord Add a name to the DNS associated to an
IPv4/IPv6 address
.br .br
-e, email Set an administrative contact address for the -e, email Set an administrative contact address for the
Block Page Block Page
@ -250,9 +255,16 @@ Available commands and options:
#m Disable Pi-hole functionality for # minute(s) #m Disable Pi-hole functionality for # minute(s)
.br .br
\fBrestartdns\fR \fBrestartdns\fR [options]
.br
Full restart Pi-hole subsystems
.br
(restart options):
.br .br
Restart Pi-hole subsystems reload Updates the lists and flushes DNS cache
.br
reload-lists Updates the lists WITHOUT flushing the DNS cache
.br .br
\fBcheckout\fR [repo] [branch] \fBcheckout\fR [repo] [branch]
@ -351,6 +363,12 @@ Switching Pi-hole subsystem branches
.br .br
Switch to core development branch Switch to core development branch
.br .br
\fBpihole arpflush\fR
.br
Flush information stored in Pi-hole's network tables
.br
.SH "SEE ALSO" .SH "SEE ALSO"
\fBlighttpd\fR(8), \fBpihole-FTL\fR(8) \fBlighttpd\fR(8), \fBpihole-FTL\fR(8)

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net) # (c) 2017 Pi-hole, LLC (https://pi-hole.net)
@ -10,11 +10,9 @@
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole" readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
readonly gravitylist="/etc/pihole/gravity.list"
readonly blacklist="/etc/pihole/black.list"
# setupVars and PI_HOLE_BIN_DIR are not readonly here because in some funcitons (checkout), # setupVars and PI_HOLE_BIN_DIR are not readonly here because in some functions (checkout),
# it might get set again when the installer is sourced. This causes an # they might get set again when the installer is sourced. This causes an
# error due to modifying a readonly variable. # error due to modifying a readonly variable.
setupVars="/etc/pihole/setupVars.conf" setupVars="/etc/pihole/setupVars.conf"
PI_HOLE_BIN_DIR="/usr/local/bin" PI_HOLE_BIN_DIR="/usr/local/bin"
@ -57,6 +55,11 @@ flushFunc() {
exit 0 exit 0
} }
arpFunc() {
"${PI_HOLE_SCRIPT_DIR}"/piholeARPTable.sh "$@"
exit 0
}
updatePiholeFunc() { updatePiholeFunc() {
shift shift
"${PI_HOLE_SCRIPT_DIR}"/update.sh "$@" "${PI_HOLE_SCRIPT_DIR}"/update.sh "$@"
@ -102,17 +105,25 @@ restartDNS() {
svcOption="${1:-restart}" svcOption="${1:-restart}"
# Determine if we should reload or restart # Determine if we should reload or restart
if [[ "${svcOption}" =~ "reload" ]]; then if [[ "${svcOption}" =~ "reload-lists" ]]; then
# Reload has been requested # Reloading of the lists has been requested
# Note: This will NOT re-read any *.conf files
# Note 2: We cannot use killall here as it does
# not know about real-time signals
svc="kill -SIGRTMIN $(pidof ${resolver})"
str="Reloading DNS lists"
elif [[ "${svcOption}" =~ "reload" ]]; then
# Reloading of the DNS cache has been requested
# Note: This will NOT re-read any *.conf files # Note: This will NOT re-read any *.conf files
svc="killall -s SIGHUP ${resolver}" svc="killall -s SIGHUP ${resolver}"
str="Flushing DNS cache"
else else
# A full restart has been requested # A full restart has been requested
svc="service ${resolver} restart" svc="service ${resolver} restart"
str="Restarting DNS server"
fi fi
# Print output to Terminal, but not to Web Admin # Print output to Terminal, but not to Web Admin
str="${svcOption^}ing DNS service"
[[ -t 1 ]] && echo -ne " ${INFO} ${str}..." [[ -t 1 ]] && echo -ne " ${INFO} ${str}..."
output=$( { ${svc}; } 2>&1 ) output=$( { ${svc}; } 2>&1 )
@ -145,14 +156,6 @@ Time:
echo -e " ${INFO} Blocking already disabled, nothing to do" echo -e " ${INFO} Blocking already disabled, nothing to do"
exit 0 exit 0
fi fi
if [[ -e "${gravitylist}" ]]; then
mv "${gravitylist}" "${gravitylist}.bck"
echo "" > "${gravitylist}"
fi
if [[ -e "${blacklist}" ]]; then
mv "${blacklist}" "${blacklist}.bck"
echo "" > "${blacklist}"
fi
if [[ $# > 1 ]]; then if [[ $# > 1 ]]; then
local error=false local error=false
if [[ "${2}" == *"s" ]]; then if [[ "${2}" == *"s" ]]; then
@ -201,12 +204,6 @@ Time:
echo -e " ${INFO} Enabling blocking" echo -e " ${INFO} Enabling blocking"
local str="Pi-hole Enabled" local str="Pi-hole Enabled"
if [[ -e "${gravitylist}.bck" ]]; then
mv "${gravitylist}.bck" "${gravitylist}"
fi
if [[ -e "${blacklist}.bck" ]]; then
mv "${blacklist}.bck" "${blacklist}"
fi
sed -i "/BLOCKING_ENABLED=/d" "${setupVars}" sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
echo "BLOCKING_ENABLED=true" >> "${setupVars}" echo "BLOCKING_ENABLED=true" >> "${setupVars}"
fi fi
@ -309,8 +306,8 @@ tailFunc() {
# Colour A/AAAA/DHCP strings as white # Colour A/AAAA/DHCP strings as white
# Colour everything else as gray # Colour everything else as gray
tail -f /var/log/pihole.log | sed -E \ tail -f /var/log/pihole.log | sed -E \
-e "s,($(date +'%b %d ')| dnsmasq[.*[0-9]]),,g" \ -e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
-e "s,(.*(gravity.list|black.list|regex.list| config ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \ -e "s,(.*(blacklisted |gravity blocked ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \ -e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
-e "s,.*,${COL_GRAY}&${COL_NC}," -e "s,.*,${COL_GRAY}&${COL_NC},"
exit 0 exit 0
@ -383,8 +380,10 @@ Add '-h' after specific commands for more information on usage
Whitelist/Blacklist Options: Whitelist/Blacklist Options:
-w, whitelist Whitelist domain(s) -w, whitelist Whitelist domain(s)
-b, blacklist Blacklist domain(s) -b, blacklist Blacklist domain(s)
--wild, wildcard Wildcard blacklist domain(s) --regex, regex Regex blacklist domains(s)
--regex, regex Regex blacklist domains(s) --white-regex Regex whitelist domains(s)
--wild, wildcard Wildcard blacklist domain(s)
--white-wild Wildcard whitelist domain(s)
Add '-h' for more info on whitelist/blacklist usage Add '-h' for more info on whitelist/blacklist usage
Debugging Options: Debugging Options:
@ -414,9 +413,12 @@ Options:
enable Enable Pi-hole subsystems enable Enable Pi-hole subsystems
disable Disable Pi-hole subsystems disable Disable Pi-hole subsystems
Add '-h' for more info on disable usage Add '-h' for more info on disable usage
restartdns Restart Pi-hole subsystems restartdns Full restart Pi-hole subsystems
Add 'reload' to update the lists and flush the cache without restarting the DNS server
Add 'reload-lists' to only update the lists WITHOUT flushing the cache or restarting the DNS server
checkout Switch Pi-hole subsystems to a different Github branch checkout Switch Pi-hole subsystems to a different Github branch
Add '-h' for more info on checkout usage"; Add '-h' for more info on checkout usage
arpflush Flush information stored in Pi-hole's network tables";
exit 0 exit 0
} }
@ -443,8 +445,10 @@ fi
case "${1}" in case "${1}" in
"-w" | "whitelist" ) listFunc "$@";; "-w" | "whitelist" ) listFunc "$@";;
"-b" | "blacklist" ) listFunc "$@";; "-b" | "blacklist" ) listFunc "$@";;
"--wild" | "wildcard" ) listFunc "$@";; "--wild" | "wildcard" ) listFunc "$@";;
"--regex" | "regex" ) listFunc "$@";; "--regex" | "regex" ) listFunc "$@";;
"--white-regex" | "white-regex" ) listFunc "$@";;
"--white-wild" | "white-wild" ) listFunc "$@";;
"-d" | "debug" ) debugFunc "$@";; "-d" | "debug" ) debugFunc "$@";;
"-f" | "flush" ) flushFunc "$@";; "-f" | "flush" ) flushFunc "$@";;
"-up" | "updatePihole" ) updatePiholeFunc "$@";; "-up" | "updatePihole" ) updatePiholeFunc "$@";;
@ -465,5 +469,6 @@ case "${1}" in
"checkout" ) piholeCheckoutFunc "$@";; "checkout" ) piholeCheckoutFunc "$@";;
"tricorder" ) tricorderFunc;; "tricorder" ) tricorderFunc;;
"updatechecker" ) updateCheckFunc "$@";; "updatechecker" ) updateCheckFunc "$@";;
"arpflush" ) arpFunc "$@";;
* ) helpFunc;; * ) helpFunc;;
esac esac

@ -92,235 +92,16 @@ def test_setupVars_saved_to_file(Pihole):
assert "{}={}".format(k, v) in output assert "{}={}".format(k, v) in output
def test_configureFirewall_firewalld_running_no_errors(Pihole): def test_selinux_not_detected(Pihole):
''' '''
confirms firewalld rules are applied when firewallD is running confirms installer continues when SELinux configuration file does not exist
''' '''
# firewallD returns 'running' as status
mock_command('firewall-cmd', {'*': ('running', 0)}, Pihole)
# Whiptail dialog returns Ok for user prompt
mock_command('whiptail', {'*': ('', 0)}, Pihole)
configureFirewall = Pihole.run('''
source /opt/pihole/basic-install.sh
configureFirewall
''')
expected_stdout = 'Configuring FirewallD for httpd and pihole-FTL'
assert expected_stdout in configureFirewall.stdout
firewall_calls = Pihole.run('cat /var/log/firewall-cmd').stdout
assert 'firewall-cmd --state' in firewall_calls
assert ('firewall-cmd '
'--permanent '
'--add-service=http '
'--add-service=dns') in firewall_calls
assert 'firewall-cmd --reload' in firewall_calls
def test_configureFirewall_firewalld_disabled_no_errors(Pihole):
'''
confirms firewalld rules are not applied when firewallD is not running
'''
# firewallD returns non-running status
mock_command('firewall-cmd', {'*': ('not running', '1')}, Pihole)
configureFirewall = Pihole.run('''
source /opt/pihole/basic-install.sh
configureFirewall
''')
expected_stdout = ('No active firewall detected.. '
'skipping firewall configuration')
assert expected_stdout in configureFirewall.stdout
def test_configureFirewall_firewalld_enabled_declined_no_errors(Pihole):
'''
confirms firewalld rules are not applied when firewallD is running, user
declines ruleset
'''
# firewallD returns running status
mock_command('firewall-cmd', {'*': ('running', 0)}, Pihole)
# Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', 1)}, Pihole)
configureFirewall = Pihole.run('''
source /opt/pihole/basic-install.sh
configureFirewall
''')
expected_stdout = 'Not installing firewall rulesets.'
assert expected_stdout in configureFirewall.stdout
def test_configureFirewall_no_firewall(Pihole):
''' confirms firewall skipped no daemon is running '''
configureFirewall = Pihole.run('''
source /opt/pihole/basic-install.sh
configureFirewall
''')
expected_stdout = 'No active firewall detected'
assert expected_stdout in configureFirewall.stdout
def test_configureFirewall_IPTables_enabled_declined_no_errors(Pihole):
'''
confirms IPTables rules are not applied when IPTables is running, user
declines ruleset
'''
# iptables command exists
mock_command('iptables', {'*': ('', '0')}, Pihole)
# modinfo returns always true (ip_tables module check)
mock_command('modinfo', {'*': ('', '0')}, Pihole)
# Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', '1')}, Pihole)
configureFirewall = Pihole.run('''
source /opt/pihole/basic-install.sh
configureFirewall
''')
expected_stdout = 'Not installing firewall rulesets.'
assert expected_stdout in configureFirewall.stdout
def test_configureFirewall_IPTables_enabled_rules_exist_no_errors(Pihole):
'''
confirms IPTables rules are not applied when IPTables is running and rules
exist
'''
# iptables command exists and returns 0 on calls
# (should return 0 on iptables -C)
mock_command('iptables', {'-S': ('-P INPUT DENY', '0')}, Pihole)
# modinfo returns always true (ip_tables module check)
mock_command('modinfo', {'*': ('', '0')}, Pihole)
# Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', '0')}, Pihole)
configureFirewall = Pihole.run('''
source /opt/pihole/basic-install.sh
configureFirewall
''')
expected_stdout = 'Installing new IPTables firewall rulesets'
assert expected_stdout in configureFirewall.stdout
firewall_calls = Pihole.run('cat /var/log/iptables').stdout
# General call type occurances
assert len(re.findall(r'iptables -S', firewall_calls)) == 1
assert len(re.findall(r'iptables -C', firewall_calls)) == 4
assert len(re.findall(r'iptables -I', firewall_calls)) == 0
# Specific port call occurances
assert len(re.findall(r'tcp --dport 80', firewall_calls)) == 1
assert len(re.findall(r'tcp --dport 53', firewall_calls)) == 1
assert len(re.findall(r'udp --dport 53', firewall_calls)) == 1
assert len(re.findall(r'tcp --dport 4711:4720', firewall_calls)) == 1
def test_configureFirewall_IPTables_enabled_not_exist_no_errors(Pihole):
'''
confirms IPTables rules are applied when IPTables is running and rules do
not exist
'''
# iptables command and returns 0 on calls (should return 1 on iptables -C)
mock_command(
'iptables',
{
'-S': (
'-P INPUT DENY',
'0'
),
'-C': (
'',
1
),
'-I': (
'',
0
)
},
Pihole
)
# modinfo returns always true (ip_tables module check)
mock_command('modinfo', {'*': ('', '0')}, Pihole)
# Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', '0')}, Pihole)
configureFirewall = Pihole.run('''
source /opt/pihole/basic-install.sh
configureFirewall
''')
expected_stdout = 'Installing new IPTables firewall rulesets'
assert expected_stdout in configureFirewall.stdout
firewall_calls = Pihole.run('cat /var/log/iptables').stdout
# General call type occurances
assert len(re.findall(r'iptables -S', firewall_calls)) == 1
assert len(re.findall(r'iptables -C', firewall_calls)) == 4
assert len(re.findall(r'iptables -I', firewall_calls)) == 4
# Specific port call occurances
assert len(re.findall(r'tcp --dport 80', firewall_calls)) == 2
assert len(re.findall(r'tcp --dport 53', firewall_calls)) == 2
assert len(re.findall(r'udp --dport 53', firewall_calls)) == 2
assert len(re.findall(r'tcp --dport 4711:4720', firewall_calls)) == 2
def test_selinux_enforcing_default_exit(Pihole):
'''
confirms installer prompts to exit when SELinux is Enforcing by default
'''
# getenforce returns the running state of SELinux
mock_command('getenforce', {'*': ('Enforcing', '0')}, Pihole)
# Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', '1')}, Pihole)
check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh
checkSelinux
''')
expected_stdout = info_box + ' SELinux mode detected: Enforcing'
assert expected_stdout in check_selinux.stdout
expected_stdout = 'SELinux Enforcing detected, exiting installer'
assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 1
def test_selinux_enforcing_continue(Pihole):
'''
confirms installer prompts to continue with custom policy warning
'''
# getenforce returns the running state of SELinux
mock_command('getenforce', {'*': ('Enforcing', '0')}, Pihole)
# Whiptail dialog returns Continue for user prompt
mock_command('whiptail', {'*': ('', '0')}, Pihole)
check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh
checkSelinux
''')
expected_stdout = info_box + ' SELinux mode detected: Enforcing'
assert expected_stdout in check_selinux.stdout
expected_stdout = info_box + (' Continuing installation with SELinux '
'Enforcing')
assert expected_stdout in check_selinux.stdout
expected_stdout = info_box + (' Please refer to official SELinux '
'documentation to create a custom policy')
assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 0
def test_selinux_permissive(Pihole):
'''
confirms installer continues when SELinux is Permissive
'''
# getenforce returns the running state of SELinux
mock_command('getenforce', {'*': ('Permissive', '0')}, Pihole)
check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh
checkSelinux
''')
expected_stdout = info_box + ' SELinux mode detected: Permissive'
assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 0
def test_selinux_disabled(Pihole):
'''
confirms installer continues when SELinux is Disabled
'''
mock_command('getenforce', {'*': ('Disabled', '0')}, Pihole)
check_selinux = Pihole.run(''' check_selinux = Pihole.run('''
rm -f /etc/selinux/config
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
checkSelinux checkSelinux
''') ''')
expected_stdout = info_box + ' SELinux mode detected: Disabled' expected_stdout = info_box + ' SELinux not detected'
assert expected_stdout in check_selinux.stdout assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 0 assert check_selinux.rc == 0
@ -338,7 +119,7 @@ def test_installPiholeWeb_fresh_install_no_errors(Pihole):
expected_stdout = tick_box + (' Creating directory for blocking page, ' expected_stdout = tick_box + (' Creating directory for blocking page, '
'and copying files') 'and copying files')
assert expected_stdout in installWeb.stdout assert expected_stdout in installWeb.stdout
expected_stdout = cross_box + ' Backing up index.lighttpd.html' expected_stdout = info_box + ' Backing up index.lighttpd.html'
assert expected_stdout in installWeb.stdout assert expected_stdout in installWeb.stdout
expected_stdout = ('No default index.lighttpd.html file found... ' expected_stdout = ('No default index.lighttpd.html file found... '
'not backing up') 'not backing up')
@ -668,3 +449,42 @@ def test_IPv6_ULA_GUA_test(Pihole):
''') ''')
expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads' expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads'
assert expected_stdout in detectPlatform.stdout assert expected_stdout in detectPlatform.stdout
def test_validate_ip_valid(Pihole):
'''
Given a valid IP address, valid_ip returns success
'''
output = Pihole.run('''
source /opt/pihole/basic-install.sh
valid_ip "192.168.1.1"
''')
assert output.rc == 0
def test_validate_ip_invalid_octet(Pihole):
'''
Given an invalid IP address (large octet), valid_ip returns an error
'''
output = Pihole.run('''
source /opt/pihole/basic-install.sh
valid_ip "1092.168.1.1"
''')
assert output.rc == 1
def test_validate_ip_invalid_letters(Pihole):
'''
Given an invalid IP address (contains letters), valid_ip returns an error
'''
output = Pihole.run('''
source /opt/pihole/basic-install.sh
valid_ip "not an IP"
''')
assert output.rc == 1

@ -8,6 +8,69 @@ from conftest import (
) )
def mock_selinux_config(state, Pihole):
'''
Creates a mock SELinux config file with expected content
'''
# validate state string
valid_states = ['enforcing', 'permissive', 'disabled']
assert state in valid_states
# getenforce returns the running state of SELinux
mock_command('getenforce', {'*': (state.capitalize(), '0')}, Pihole)
# create mock configuration with desired content
Pihole.run('''
mkdir /etc/selinux
echo "SELINUX={state}" > /etc/selinux/config
'''.format(state=state.lower()))
@pytest.mark.parametrize("tag", [('centos'), ('fedora'), ])
def test_selinux_enforcing_exit(Pihole):
'''
confirms installer prompts to exit when SELinux is Enforcing by default
'''
mock_selinux_config("enforcing", Pihole)
check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh
checkSelinux
''')
expected_stdout = cross_box + ' Current SELinux: Enforcing'
assert expected_stdout in check_selinux.stdout
expected_stdout = 'SELinux Enforcing detected, exiting installer'
assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 1
@pytest.mark.parametrize("tag", [('centos'), ('fedora'), ])
def test_selinux_permissive(Pihole):
'''
confirms installer continues when SELinux is Permissive
'''
mock_selinux_config("permissive", Pihole)
check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh
checkSelinux
''')
expected_stdout = tick_box + ' Current SELinux: Permissive'
assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 0
@pytest.mark.parametrize("tag", [('centos'), ('fedora'), ])
def test_selinux_disabled(Pihole):
'''
confirms installer continues when SELinux is Disabled
'''
mock_selinux_config("disabled", Pihole)
check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh
checkSelinux
''')
expected_stdout = tick_box + ' Current SELinux: Disabled'
assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 0
@pytest.mark.parametrize("tag", [('fedora'), ]) @pytest.mark.parametrize("tag", [('fedora'), ])
def test_epel_and_remi_not_installed_fedora(Pihole): def test_epel_and_remi_not_installed_fedora(Pihole):
''' '''

Loading…
Cancel
Save