Merge pull request #3152 from pi-hole/release/v5.0

Update development with latest release/v5.0 changes
pull/3185/head
Adam Warner 4 years ago committed by GitHub
commit e6bfb0fe17
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,4 +0,0 @@
# These are supported funding model platforms
patreon: pihole
custom: https://pi-hole.net/donate

@ -87,4 +87,21 @@ upgrade_gravityDB(){
sqlite3 "${database}" < "${scriptPath}/8_to_9.sql" sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
version=9 version=9
fi fi
if [[ "$version" == "9" ]]; then
# This migration drops unused tables and creates triggers to remove
# obsolete groups assignments when the linked items are deleted
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
version=10
fi
if [[ "$version" == "10" ]]; then
# This adds timestamp and an optional comment field to the client table
# These fields are only temporary and will be replaces by the columns
# defined in gravity.db.sql during gravity swapping. We add them here
# to keep the copying process generic (needs the same columns in both the
# source and the destination databases).
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
version=11
fi
} }

@ -0,0 +1,16 @@
.timeout 30000
BEGIN TRANSACTION;
ALTER TABLE client ADD COLUMN date_added INTEGER;
ALTER TABLE client ADD COLUMN date_modified INTEGER;
ALTER TABLE client ADD COLUMN comment TEXT;
CREATE TRIGGER tr_client_update AFTER UPDATE ON client
BEGIN
UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
END;
UPDATE info SET value = 11 WHERE property = 'version';
COMMIT;

@ -0,0 +1,29 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP TABLE IF EXISTS whitelist;
DROP TABLE IF EXISTS blacklist;
DROP TABLE IF EXISTS regex_whitelist;
DROP TABLE IF EXISTS regex_blacklist;
CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
BEGIN
DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
END;
CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
BEGIN
DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
END;
CREATE TRIGGER tr_client_delete AFTER DELETE ON client
BEGIN
DELETE FROM client_by_group WHERE client_id = OLD.id;
END;
UPDATE info SET value = 10 WHERE property = 'version';
COMMIT;

@ -662,19 +662,21 @@ ping_internet() {
} }
compare_port_to_service_assigned() { compare_port_to_service_assigned() {
local service_name="${1}" local service_name
# The programs we use may change at some point, so they are in a varible here local expected_service
local resolver="pihole-FTL" local port
local web_server="lighttpd"
local ftl="pihole-FTL" service_name="${2}"
expected_service="${1}"
port="${3}"
# If the service is a Pi-hole service, highlight it in green # If the service is a Pi-hole service, highlight it in green
if [[ "${service_name}" == "${resolver}" ]] || [[ "${service_name}" == "${web_server}" ]] || [[ "${service_name}" == "${ftl}" ]]; then if [[ "${service_name}" == "${expected_service}" ]]; then
log_write "[${COL_GREEN}${port_number}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}" log_write "[${COL_GREEN}${port}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
# Otherwise, # Otherwise,
else else
# Show the service name in red since it's non-standard # Show the service name in red since it's non-standard
log_write "[${COL_RED}${port_number}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})" log_write "[${COL_RED}${port}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
fi fi
} }
@ -708,11 +710,11 @@ check_required_ports() {
fi fi
# Use a case statement to determine if the right services are using the right ports # Use a case statement to determine if the right services are using the right ports
case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
53) compare_port_to_service_assigned "${resolver}" 53) compare_port_to_service_assigned "${resolver}" "${service_name}" 53
;; ;;
80) compare_port_to_service_assigned "${web_server}" 80) compare_port_to_service_assigned "${web_server}" "${service_name}" 80
;; ;;
4711) compare_port_to_service_assigned "${ftl}" 4711) compare_port_to_service_assigned "${ftl}" "${service_name}" 4711
;; ;;
# If it's not a default port that Pi-hole needs, just print it out for the user to see # If it's not a default port that Pi-hole needs, just print it out for the user to see
*) log_write "${port_number} ${service_name} (${protocol_type})"; *) log_write "${port_number} ${service_name} (${protocol_type})";
@ -1105,7 +1107,7 @@ show_db_entries() {
} }
show_groups() { show_groups() {
show_db_entries "Groups" "SELECT * FROM \"group\"" "4 4 30 50" show_db_entries "Groups" "SELECT id,name,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,description FROM \"group\"" "4 50 7 19 19 50"
} }
show_adlists() { show_adlists() {
@ -1113,18 +1115,14 @@ show_adlists() {
show_db_entries "Adlist groups" "SELECT * FROM adlist_by_group" "4 4" show_db_entries "Adlist groups" "SELECT * FROM adlist_by_group" "4 4"
} }
show_whitelist() { show_domainlist() {
show_db_entries "Exact whitelist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM whitelist" "4 100 7 19 19 50" show_db_entries "Domainlist (0/1 = exact/regex whitelist, 2/3 = exact/regex blacklist)" "SELECT id,type,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist" "4 4 100 7 19 19 50"
show_db_entries "Exact whitelist groups" "SELECT * FROM whitelist_by_group" "4 4" show_db_entries "Domainlist groups" "SELECT * FROM domainlist_by_group" "10 10"
show_db_entries "Regex whitelist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM regex_whitelist" "4 100 7 19 19 50"
show_db_entries "Regex whitelist groups" "SELECT * FROM regex_whitelist_by_group" "4 4"
} }
show_blacklist() { show_clients() {
show_db_entries "Exact blacklist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM blacklist" "4 100 7 19 19 50" show_db_entries "Clients" "SELECT id,ip,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM client" "4 100 19 19 50"
show_db_entries "Exact blacklist groups" "SELECT * FROM blacklist_by_group" "4 4" show_db_entries "Client groups" "SELECT * FROM client_by_group" "10 10"
show_db_entries "Regex blacklist" "SELECT id,domain,enabled,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM regex_blacklist" "4 100 7 19 19 50"
show_db_entries "Regex blacklist groups" "SELECT * FROM regex_blacklist_by_group" "4 4"
} }
analyze_gravity_list() { analyze_gravity_list() {
@ -1134,16 +1132,17 @@ analyze_gravity_list() {
gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}") gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}" log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
local gravity_size show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
gravity_size=$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT COUNT(*) FROM vw_gravity") gravity_updated_raw="$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
log_write " Size (excluding blacklist): ${COL_CYAN}${gravity_size}${COL_NC} entries" gravity_updated="$(date -d @"${gravity_updated_raw}")"
log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
log_write "" log_write ""
OLD_IFS="$IFS" OLD_IFS="$IFS"
IFS=$'\r\n' IFS=$'\r\n'
local gravity_sample=() local gravity_sample=()
mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10") mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
log_write " ${COL_CYAN}----- First 10 Domains -----${COL_NC}" log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
for line in "${gravity_sample[@]}"; do for line in "${gravity_sample[@]}"; do
log_write " ${line}" log_write " ${line}"
@ -1301,9 +1300,9 @@ parse_setup_vars
check_x_headers check_x_headers
analyze_gravity_list analyze_gravity_list
show_groups show_groups
show_domainlist
show_clients
show_adlists show_adlists
show_whitelist
show_blacklist
show_content_of_pihole_files show_content_of_pihole_files
parse_locale parse_locale
analyze_pihole_log analyze_pihole_log

@ -198,6 +198,14 @@ main() {
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \ ${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
echo -e "${basicError}" && exit 1 echo -e "${basicError}" && exit 1
fi fi
if [[ "${FTL_update}" == true || "${core_update}" == true || "${web_update}" == true ]]; then
# Force an update of the updatechecker
/opt/pihole/updatecheck.sh
/opt/pihole/updatecheck.sh x remote
echo -e " ${INFO} Local version file information updated."
fi
echo "" echo ""
exit 0 exit 0
} }

@ -1,16 +1,21 @@
PRAGMA FOREIGN_KEYS=ON; PRAGMA foreign_keys=OFF;
BEGIN TRANSACTION;
CREATE TABLE "group" CREATE TABLE "group"
( (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
enabled BOOLEAN NOT NULL DEFAULT 1, enabled BOOLEAN NOT NULL DEFAULT 1,
name TEXT NOT NULL, name TEXT UNIQUE NOT NULL,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
description TEXT description TEXT
); );
INSERT INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
CREATE TABLE whitelist CREATE TABLE domainlist
( (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
type INTEGER NOT NULL DEFAULT 0,
domain TEXT UNIQUE NOT NULL, domain TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1, enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)), date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
@ -18,125 +23,166 @@ CREATE TABLE whitelist
comment TEXT comment TEXT
); );
CREATE TABLE whitelist_by_group CREATE TABLE adlist
(
whitelist_id INTEGER NOT NULL REFERENCES whitelist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (whitelist_id, group_id)
);
CREATE TABLE blacklist
( (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT UNIQUE NOT NULL, address TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1, enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)), date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)), date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT comment TEXT
); );
CREATE TABLE blacklist_by_group CREATE TABLE adlist_by_group
( (
blacklist_id INTEGER NOT NULL REFERENCES blacklist (id), adlist_id INTEGER NOT NULL REFERENCES adlist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id), group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (blacklist_id, group_id) PRIMARY KEY (adlist_id, group_id)
); );
CREATE TABLE regex CREATE TABLE gravity
(
domain TEXT NOT NULL,
adlist_id INTEGER NOT NULL REFERENCES adlist (id)
);
CREATE TABLE info
(
property TEXT PRIMARY KEY,
value TEXT NOT NULL
);
INSERT INTO "info" VALUES('version','11');
CREATE TABLE domain_audit
( (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT UNIQUE NOT NULL, domain TEXT UNIQUE NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT 1, date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int))
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT
); );
CREATE TABLE regex_by_group CREATE TABLE domainlist_by_group
( (
regex_id INTEGER NOT NULL REFERENCES regex (id), domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id), group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (regex_id, group_id) PRIMARY KEY (domainlist_id, group_id)
); );
CREATE TABLE adlist CREATE TABLE client
( (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
address TEXT UNIQUE NOT NULL, ip TEXT NOL NULL UNIQUE,
enabled BOOLEAN NOT NULL DEFAULT 1,
date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)), date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)), date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
comment TEXT comment TEXT
); );
CREATE TABLE adlist_by_group CREATE TABLE client_by_group
( (
adlist_id INTEGER NOT NULL REFERENCES adlist (id), client_id INTEGER NOT NULL REFERENCES client (id),
group_id INTEGER NOT NULL REFERENCES "group" (id), group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (adlist_id, group_id) PRIMARY KEY (client_id, group_id)
); );
CREATE TABLE gravity CREATE TRIGGER tr_adlist_update AFTER UPDATE ON adlist
( BEGIN
domain TEXT PRIMARY KEY UPDATE adlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE address = NEW.address;
); END;
CREATE TABLE info CREATE TRIGGER tr_client_update AFTER UPDATE ON client
( BEGIN
property TEXT PRIMARY KEY, UPDATE client SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE ip = NEW.ip;
value TEXT NOT NULL END;
);
INSERT INTO info VALUES("version","1"); CREATE TRIGGER tr_domainlist_update AFTER UPDATE ON domainlist
BEGIN
UPDATE domainlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
END;
CREATE VIEW vw_whitelist AS SELECT DISTINCT domain CREATE VIEW vw_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM whitelist FROM domainlist
LEFT JOIN whitelist_by_group ON whitelist_by_group.whitelist_id = whitelist.id LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = whitelist_by_group.group_id LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE whitelist.enabled = 1 AND (whitelist_by_group.group_id IS NULL OR "group".enabled = 1) WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY whitelist.id; AND domainlist.type = 0
ORDER BY domainlist.id;
CREATE VIEW vw_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 1
ORDER BY domainlist.id;
CREATE VIEW vw_regex_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 2
ORDER BY domainlist.id;
CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 3
ORDER BY domainlist.id;
CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
FROM gravity
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
LEFT JOIN adlist ON adlist.id = gravity.adlist_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
CREATE TRIGGER tr_whitelist_update AFTER UPDATE ON whitelist CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
FROM adlist
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY adlist.id;
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
BEGIN BEGIN
UPDATE whitelist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain; INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
END; END;
CREATE VIEW vw_blacklist AS SELECT DISTINCT domain CREATE TRIGGER tr_client_add AFTER INSERT ON client
FROM blacklist BEGIN
LEFT JOIN blacklist_by_group ON blacklist_by_group.blacklist_id = blacklist.id INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
LEFT JOIN "group" ON "group".id = blacklist_by_group.group_id END;
WHERE blacklist.enabled = 1 AND (blacklist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY blacklist.id;
CREATE TRIGGER tr_blacklist_update AFTER UPDATE ON blacklist CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
BEGIN BEGIN
UPDATE blacklist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain; INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
END; END;
CREATE VIEW vw_regex AS SELECT DISTINCT domain CREATE TRIGGER tr_group_update AFTER UPDATE ON "group"
FROM regex BEGIN
LEFT JOIN regex_by_group ON regex_by_group.regex_id = regex.id UPDATE "group" SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE id = NEW.id;
LEFT JOIN "group" ON "group".id = regex_by_group.group_id END;
WHERE regex.enabled = 1 AND (regex_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY regex.id;
CREATE TRIGGER tr_regex_update AFTER UPDATE ON regex CREATE TRIGGER tr_group_zero AFTER DELETE ON "group"
BEGIN BEGIN
UPDATE regex SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain; INSERT OR IGNORE INTO "group" (id,enabled,name) VALUES (0,1,'Unassociated');
END; END;
CREATE VIEW vw_adlist AS SELECT DISTINCT address CREATE TRIGGER tr_domainlist_delete AFTER DELETE ON domainlist
FROM adlist BEGIN
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id END;
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY adlist.id;
CREATE TRIGGER tr_adlist_update AFTER UPDATE ON adlist CREATE TRIGGER tr_adlist_delete AFTER DELETE ON adlist
BEGIN BEGIN
UPDATE adlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE address = NEW.address; DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
END; END;
CREATE VIEW vw_gravity AS SELECT domain CREATE TRIGGER tr_client_delete AFTER DELETE ON client
FROM gravity BEGIN
WHERE domain NOT IN (SELECT domain from vw_whitelist); DELETE FROM client_by_group WHERE client_id = OLD.id;
END;
COMMIT;

@ -0,0 +1,42 @@
.timeout 30000
ATTACH DATABASE '/etc/pihole/gravity.db' AS OLD;
BEGIN TRANSACTION;
DROP TRIGGER tr_domainlist_add;
DROP TRIGGER tr_client_add;
DROP TRIGGER tr_adlist_add;
INSERT OR REPLACE INTO "group" SELECT * FROM OLD."group";
INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
BEGIN
INSERT INTO domainlist_by_group (domainlist_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_client_add AFTER INSERT ON client
BEGIN
INSERT INTO client_by_group (client_id, group_id) VALUES (NEW.id, 0);
END;
CREATE TRIGGER tr_adlist_add AFTER INSERT ON adlist
BEGIN
INSERT INTO adlist_by_group (adlist_id, group_id) VALUES (NEW.id, 0);
END;
COMMIT;

@ -96,12 +96,6 @@ if ($serverName === "pi.hole") {
// Define admin email address text based off $svEmail presence // Define admin email address text based off $svEmail presence
$bpAskAdmin = !empty($svEmail) ? '<a href="mailto:'.$svEmail.'?subject=Site Blocked: '.$serverName.'"></a>' : "<span/>"; $bpAskAdmin = !empty($svEmail) ? '<a href="mailto:'.$svEmail.'?subject=Site Blocked: '.$serverName.'"></a>' : "<span/>";
// Determine if at least one block list has been generated
$blocklistglob = glob("/etc/pihole/list.0.*.domains");
if ($blocklistglob === array()) {
die("[ERROR] There are no domain lists generated lists within <code>/etc/pihole/</code>! Please update gravity by running <code>pihole -g</code>, or repair Pi-hole using <code>pihole -r</code>.");
}
// Get possible non-standard location of FTL's database // Get possible non-standard location of FTL's database
$FTLsettings = parse_ini_file("/etc/pihole/pihole-FTL.conf"); $FTLsettings = parse_ini_file("/etc/pihole/pihole-FTL.conf");
if (isset($FTLsettings["GRAVITYDB"])) { if (isset($FTLsettings["GRAVITYDB"])) {

@ -247,7 +247,7 @@ if is_command apt-get ; then
PIHOLE_DEPS=(cron curl dnsutils iputils-ping lsof netcat psmisc sudo unzip wget idn2 sqlite3 libcap2-bin dns-root-data resolvconf libcap2) PIHOLE_DEPS=(cron curl dnsutils iputils-ping lsof netcat psmisc sudo unzip wget idn2 sqlite3 libcap2-bin dns-root-data resolvconf libcap2)
# The Web dashboard has some that also need to be installed # The Web dashboard has some that also need to be installed
# It's useful to separate the two since our repos are also setup as "Core" code and "Web" code # It's useful to separate the two since our repos are also setup as "Core" code and "Web" code
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "${phpVer}-intl") PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "php-intl")
# The Web server user, # The Web server user,
LIGHTTPD_USER="www-data" LIGHTTPD_USER="www-data"
# group, # group,
@ -427,11 +427,11 @@ make_repo() {
# Clone the repo and return the return code from this command # Clone the repo and return the return code from this command
git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $? git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $?
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git) # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
chmod -R a+rX "${directory}" chmod -R a+rX "${directory}"
# Move into the directory that was passed as an argument # Move into the directory that was passed as an argument
pushd "${directory}" &> /dev/null || return 1 pushd "${directory}" &> /dev/null || return 1
# Check current branch. If it is master, then reset to the latest availible tag. # Check current branch. If it is master, then reset to the latest availible tag.
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks) # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
curBranch=$(git rev-parse --abbrev-ref HEAD) curBranch=$(git rev-parse --abbrev-ref HEAD)
if [[ "${curBranch}" == "master" ]]; then #If we're calling make_repo() then it should always be master, we may not need to check. if [[ "${curBranch}" == "master" ]]; then #If we're calling make_repo() then it should always be master, we may not need to check.
git reset --hard "$(git describe --abbrev=0 --tags)" || return $? git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
@ -457,7 +457,7 @@ update_repo() {
# Again, it's useful to store these in variables in case we need to reuse or change the message; # Again, it's useful to store these in variables in case we need to reuse or change the message;
# we only need to make one change here # we only need to make one change here
local str="Update repo in ${1}" local str="Update repo in ${1}"
# Move into the directory that was passed as an argument # Move into the directory that was passed as an argument
pushd "${directory}" &> /dev/null || return 1 pushd "${directory}" &> /dev/null || return 1
# Let the user know what's happening # Let the user know what's happening
printf " %b %s..." "${INFO}" "${str}" printf " %b %s..." "${INFO}" "${str}"
@ -467,7 +467,7 @@ update_repo() {
# Pull the latest commits # Pull the latest commits
git pull --quiet &> /dev/null || return $? git pull --quiet &> /dev/null || return $?
# Check current branch. If it is master, then reset to the latest availible tag. # Check current branch. If it is master, then reset to the latest availible tag.
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks) # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
curBranch=$(git rev-parse --abbrev-ref HEAD) curBranch=$(git rev-parse --abbrev-ref HEAD)
if [[ "${curBranch}" == "master" ]]; then if [[ "${curBranch}" == "master" ]]; then
git reset --hard "$(git describe --abbrev=0 --tags)" || return $? git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
@ -529,7 +529,7 @@ resetRepo() {
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
# Return to where we came from # Return to where we came from
popd &> /dev/null || return 1 popd &> /dev/null || return 1
# Returning success anyway? # Returning success anyway?
return 0 return 0
} }
@ -1232,7 +1232,7 @@ appendToListsFile() {
case $1 in case $1 in
StevenBlack ) echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}";; StevenBlack ) echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}";;
MalwareDom ) echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}";; MalwareDom ) echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}";;
Cameleon ) echo "http://sysctl.org/cameleon/hosts" >> "${adlistFile}";; Cameleon ) echo "https://sysctl.org/cameleon/hosts" >> "${adlistFile}";;
DisconTrack ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}";; DisconTrack ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}";;
DisconAd ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}";; DisconAd ) echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}";;
HostsFile ) echo "https://hosts-file.net/ad_servers.txt" >> "${adlistFile}";; HostsFile ) echo "https://hosts-file.net/ad_servers.txt" >> "${adlistFile}";;
@ -2228,15 +2228,6 @@ FTLinstall() {
local str="Downloading and Installing FTL" local str="Downloading and Installing FTL"
printf " %b %s..." "${INFO}" "${str}" printf " %b %s..." "${INFO}" "${str}"
# Find the latest version tag for FTL
latesttag=$(curl -sI https://github.com/pi-hole/FTL/releases/latest | grep "Location" | awk -F '/' '{print $NF}')
# Tags should always start with v, check for that.
if [[ ! "${latesttag}" == v* ]]; then
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
printf " %bError: Unable to get latest release location from GitHub%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
return 1
fi
# Move into the temp ftl directory # Move into the temp ftl directory
pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; } pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
@ -2257,7 +2248,7 @@ FTLinstall() {
# Determine which version of FTL to download # Determine which version of FTL to download
if [[ "${ftlBranch}" == "master" ]];then if [[ "${ftlBranch}" == "master" ]];then
url="https://github.com/pi-hole/FTL/releases/download/${latesttag%$'\r'}" url="https://github.com/pi-hole/ftl/releases/latest/download"
else else
url="https://ftl.pi-hole.net/${ftlBranch}" url="https://ftl.pi-hole.net/${ftlBranch}"
fi fi
@ -2468,17 +2459,14 @@ FTLcheckUpdate() {
if [[ ${ftlLoc} ]]; then if [[ ${ftlLoc} ]]; then
local FTLversion local FTLversion
FTLversion=$(/usr/bin/pihole-FTL tag) FTLversion=$(/usr/bin/pihole-FTL tag)
local FTLreleaseData
local FTLlatesttag local FTLlatesttag
if ! FTLreleaseData=$(curl -sI https://github.com/pi-hole/FTL/releases/latest); then if ! FTLlatesttag=$(curl -sI https://github.com/pi-hole/FTL/releases/latest | grep --color=never -i Location | awk -F / '{print $NF}' | tr -d '[:cntrl:]'); then
# There was an issue while retrieving the latest version # There was an issue while retrieving the latest version
printf " %b Failed to retrieve latest FTL release metadata" "${CROSS}" printf " %b Failed to retrieve latest FTL release metadata" "${CROSS}"
return 3 return 3
fi fi
FTLlatesttag=$(grep 'Location' <<< "${FTLreleaseData}" | awk -F '/' '{print $NF}' | tr -d '\r\n')
if [[ "${FTLversion}" != "${FTLlatesttag}" ]]; then if [[ "${FTLversion}" != "${FTLlatesttag}" ]]; then
return 0 return 0
else else

@ -36,7 +36,9 @@ VPNList="/etc/openvpn/ipp.txt"
piholeGitDir="/etc/.pihole" piholeGitDir="/etc/.pihole"
gravityDBfile="${piholeDir}/gravity.db" gravityDBfile="${piholeDir}/gravity.db"
gravityTEMPfile="${piholeDir}/gravity_temp.db"
gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql" gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql"
gravityDBcopy="${piholeGitDir}/advanced/Templates/gravity_copy.sql"
optimize_database=false optimize_database=false
domainsExtension="domains" domainsExtension="domains"
@ -80,31 +82,49 @@ fi
# Generate new sqlite3 file from schema template # Generate new sqlite3 file from schema template
generate_gravity_database() { generate_gravity_database() {
sqlite3 "${gravityDBfile}" < "${gravityDBschema}" sqlite3 "${1}" < "${gravityDBschema}"
} }
update_gravity_timestamp() { # Copy data from old to new database file and swap them
# Update timestamp when the gravity table was last updated successfully gravity_swap_databases() {
output=$( { sqlite3 "${gravityDBfile}" <<< "INSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%s', 'now') as int));"; } 2>&1 ) local str
str="Building tree"
echo -ne " ${INFO} ${str}..."
# The index is intentionally not UNIQUE as prro quality adlists may contain domains more than once
output=$( { sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to update gravity timestamp in database ${gravityDBfile}\\n ${output}" echo -e "\\n ${CROSS} Unable to build gravity tree in ${gravityTEMPfile}\\n ${output}"
return 1 return 1
fi fi
return 0 echo -e "${OVER} ${TICK} ${str}"
}
database_truncate_table() { str="Swapping databases"
local table echo -ne " ${INFO} ${str}..."
table="${1}"
output=$( { sqlite3 "${gravityDBfile}" <<< "DELETE FROM ${table};"; } 2>&1 ) output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBcopy}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to truncate ${table} database ${gravityDBfile}\\n ${output}" echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
gravity_Cleanup "error" return 1
fi
echo -e "${OVER} ${TICK} ${str}"
# Swap databases and remove old database
rm "${gravityDBfile}"
mv "${gravityTEMPfile}" "${gravityDBfile}"
}
# Update timestamp when the gravity table was last updated successfully
update_gravity_timestamp() {
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to update gravity timestamp in database ${gravityDBfile}\\n ${output}"
return 1 return 1
fi fi
return 0 return 0
@ -113,73 +133,80 @@ database_truncate_table() {
# Import domains from file and store them in the specified database table # Import domains from file and store them in the specified database table
database_table_from_file() { database_table_from_file() {
# Define locals # Define locals
local table source backup_path backup_file arg local table source backup_path backup_file tmpFile type
table="${1}" table="${1}"
source="${2}" source="${2}"
arg="${3}"
backup_path="${piholeDir}/migration_backup" backup_path="${piholeDir}/migration_backup"
backup_file="${backup_path}/$(basename "${2}")" backup_file="${backup_path}/$(basename "${2}")"
# Truncate table only if not gravity (we add multiple times to this table)
if [[ "${table}" != "gravity" ]]; then
database_truncate_table "${table}"
fi
local tmpFile
tmpFile="$(mktemp -p "/tmp" --suffix=".gravity")" tmpFile="$(mktemp -p "/tmp" --suffix=".gravity")"
local timestamp local timestamp
timestamp="$(date --utc +'%s')" timestamp="$(date --utc +'%s')"
local inputfile
# Apply format for white-, blacklist, regex, and adlist tables
# Read file line by line
local rowid local rowid
declare -i rowid declare -i rowid
rowid=1 rowid=1
if [[ "${table}" == "gravity" ]]; then # Special handling for domains to be imported into the common domainlist table
#Append ,${arg} to every line and then remove blank lines before import if [[ "${table}" == "whitelist" ]]; then
sed -e "s/$/,${arg}/" "${source}" > "${tmpFile}" type="0"
sed -i '/^$/d' "${tmpFile}" table="domainlist"
else elif [[ "${table}" == "blacklist" ]]; then
grep -v '^ *#' < "${source}" | while IFS= read -r domain type="1"
do table="domainlist"
# Only add non-empty lines elif [[ "${table}" == "regex" ]]; then
if [[ -n "${domain}" ]]; then type="3"
if [[ "${table}" == "domain_audit" ]]; then table="domainlist"
# domain_audit table format (no enable or modified fields) fi
echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
else # Get MAX(id) from domainlist when INSERTing into this table
# White-, black-, and regexlist format if [[ "${table}" == "domainlist" ]]; then
echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}" rowid="$(sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
fi if [[ -z "$rowid" ]]; then
rowid+=1 rowid=0
fi fi
done rowid+=1
fi fi
inputfile="${tmpFile}"
# Remove possible duplicates found in lower-quality adlists # Loop over all domains in ${source} file
sort -u -o "${inputfile}" "${inputfile}" # Read file line by line
grep -v '^ *#' < "${source}" | while IFS= read -r domain
do
# Only add non-empty lines
if [[ -n "${domain}" ]]; then
if [[ "${table}" == "domain_audit" ]]; then
# domain_audit table format (no enable or modified fields)
echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
elif [[ "${table}" == "adlist" ]]; then
# Adlist table format
echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
else
# White-, black-, and regexlist table format
echo "${rowid},${type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${source}\"" >> "${tmpFile}"
fi
rowid+=1
fi
done
# Store domains in database table specified by ${table} # Store domains in database table specified by ${table}
# Use printf as .mode and .import need to be on separate lines # Use printf as .mode and .import need to be on separate lines
# see https://unix.stackexchange.com/a/445615/83260 # see https://unix.stackexchange.com/a/445615/83260
output=$( { printf ".timeout 10000\\n.mode csv\\n.import \"%s\" %s\\n" "${inputfile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to fill table ${table} in database ${gravityDBfile}\\n ${output}" echo -e "\\n ${CROSS} Unable to fill table ${table}${type} in database ${gravityDBfile}\\n ${output}"
gravity_Cleanup "error" gravity_Cleanup "error"
fi fi
# Delete tmpfile
rm "${tmpFile}" > /dev/null 2>&1 || \
echo -e " ${CROSS} Unable to remove ${tmpFile}"
# Move source file to backup directory, create directory if not existing # Move source file to backup directory, create directory if not existing
mkdir -p "${backup_path}" mkdir -p "${backup_path}"
mv "${source}" "${backup_file}" 2> /dev/null || \ mv "${source}" "${backup_file}" 2> /dev/null || \
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}" echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
# Delete tmpFile
rm "${tmpFile}" > /dev/null 2>&1 || \
echo -e " ${CROSS} Unable to remove ${tmpFile}"
} }
# Migrate pre-v5.0 list files to database-based Pi-hole versions # Migrate pre-v5.0 list files to database-based Pi-hole versions
@ -188,7 +215,10 @@ migrate_to_database() {
if [ ! -e "${gravityDBfile}" ]; then if [ ! -e "${gravityDBfile}" ]; then
# Create new database file - note that this will be created in version 1 # Create new database file - note that this will be created in version 1
echo -e " ${INFO} Creating new gravity database" echo -e " ${INFO} Creating new gravity database"
generate_gravity_database generate_gravity_database "${gravityDBfile}"
# Check if gravity database needs to be updated
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
# Migrate list files to new database # Migrate list files to new database
if [ -e "${adListFile}" ]; then if [ -e "${adListFile}" ]; then
@ -306,16 +336,25 @@ gravity_DownloadBlocklists() {
return 1 return 1
fi fi
local url domain agent cmd_ext str local url domain agent cmd_ext str target
echo "" echo ""
# Flush gravity table once before looping over sources # Prepare new gravity database
str="Flushing gravity table" str="Preparing new gravity database"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
if database_truncate_table "gravity"; then rm "${gravityTEMPfile}" > /dev/null 2>&1
output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to create new database ${gravityTEMPfile}\\n ${output}"
gravity_Cleanup "error"
else
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
fi fi
target="$(mktemp -p "/tmp" --suffix=".gravity")"
# Loop through $sources and download each one # Loop through $sources and download each one
for ((i = 0; i < "${#sources[@]}"; i++)); do for ((i = 0; i < "${#sources[@]}"; i++)); do
url="${sources[$i]}" url="${sources[$i]}"
@ -335,15 +374,82 @@ gravity_DownloadBlocklists() {
esac esac
echo -e " ${INFO} Target: ${url}" echo -e " ${INFO} Target: ${url}"
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}"
echo "" echo ""
done done
str="Storing downloaded domains in new gravity database"
echo -ne " ${INFO} ${str}..."
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | sqlite3 "${gravityTEMPfile}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to fill gravity table in database ${gravityTEMPfile}\\n ${output}"
gravity_Cleanup "error"
else
echo -e "${OVER} ${TICK} ${str}"
fi
if [[ "${status}" -eq 0 && -n "${output}" ]]; then
echo -e " Encountered non-critical SQL warnings. Please check the suitability of the lists you're using!\\n\\n SQL warnings:"
local warning file line lineno
while IFS= read -r line; do
echo " - ${line}"
warning="$(grep -oh "^[^:]*:[0-9]*" <<< "${line}")"
file="${warning%:*}"
lineno="${warning#*:}"
if [[ -n "${file}" && -n "${lineno}" ]]; then
echo -n " Line contains: "
awk "NR==${lineno}" < "${file}"
fi
done <<< "${output}"
echo ""
fi
rm "${target}" > /dev/null 2>&1 || \
echo -e " ${CROSS} Unable to remove ${target}"
gravity_Blackbody=true gravity_Blackbody=true
} }
total_num=0
parseList() {
local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
# This sed does the following things:
# 1. Remove all domains containing invalid characters. Valid are: a-z, A-Z, 0-9, dot (.), minus (-), underscore (_)
# 2. Append ,adlistID to every line
# 3. Ensures there is a newline on the last line
sed -e "/[^a-zA-Z0-9.\_-]/d;s/$/,${adlistID}/;/.$/a\\" "${src}" >> "${target}"
# Find (up to) five domains containing invalid characters (see above)
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
local num_lines num_target_lines num_correct_lines num_invalid
# Get number of lines in source file
num_lines="$(grep -c "^" "${src}")"
# Get number of lines in destination file
num_target_lines="$(grep -c "^" "${target}")"
num_correct_lines="$(( num_target_lines-total_num ))"
total_num="$num_target_lines"
num_invalid="$(( num_lines-num_correct_lines ))"
if [[ "${num_invalid}" -eq 0 ]]; then
echo " ${INFO} Received ${num_lines} domains"
else
echo " ${INFO} Received ${num_lines} domains, ${num_invalid} domains invalid!"
fi
# Display sample of invalid lines if we found some
if [[ -n "${incorrect_lines}" ]]; then
echo " Sample of invalid domains:"
while IFS= read -r line; do
echo " - ${line}"
done <<< "${incorrect_lines}"
fi
}
# Download specified URL and perform checks on HTTP status and file content # Download specified URL and perform checks on HTTP status and file content
gravity_DownloadBlocklistFromUrl() { gravity_DownloadBlocklistFromUrl() {
local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" heisenbergCompensator="" patternBuffer str httpCode success="" local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" saveLocation="${5}" target="${6}"
local heisenbergCompensator="" patternBuffer str httpCode success=""
# Create temp file to store content on disk instead of RAM # Create temp file to store content on disk instead of RAM
patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb") patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb")
@ -424,20 +530,14 @@ gravity_DownloadBlocklistFromUrl() {
# Determine if the blocklist was downloaded and saved correctly # Determine if the blocklist was downloaded and saved correctly
if [[ "${success}" == true ]]; then if [[ "${success}" == true ]]; then
if [[ "${httpCode}" == "304" ]]; then if [[ "${httpCode}" == "304" ]]; then
# Add domains to database table # Add domains to database table file
str="Adding adlist with ID ${adlistID} to database table" parseList "${adlistID}" "${saveLocation}" "${target}"
echo -ne " ${INFO} ${str}..."
database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
echo -e "${OVER} ${TICK} ${str}"
# Check if $patternbuffer is a non-zero length file # Check if $patternbuffer is a non-zero length file
elif [[ -s "${patternBuffer}" ]]; then elif [[ -s "${patternBuffer}" ]]; then
# Determine if blocklist is non-standard and parse as appropriate # Determine if blocklist is non-standard and parse as appropriate
gravity_ParseFileIntoDomains "${patternBuffer}" "${saveLocation}" gravity_ParseFileIntoDomains "${patternBuffer}" "${saveLocation}"
# Add domains to database table # Add domains to database table file
str="Adding adlist with ID ${adlistID} to database table" parseList "${adlistID}" "${saveLocation}" "${target}"
echo -ne " ${INFO} ${str}..."
database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
echo -e "${OVER} ${TICK} ${str}"
else else
# Fall back to previously cached list if $patternBuffer is empty # Fall back to previously cached list if $patternBuffer is empty
echo -e " ${INFO} Received empty file: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}" echo -e " ${INFO} Received empty file: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
@ -446,11 +546,8 @@ gravity_DownloadBlocklistFromUrl() {
# Determine if cached list has read permission # Determine if cached list has read permission
if [[ -r "${saveLocation}" ]]; then if [[ -r "${saveLocation}" ]]; then
echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
# Add domains to database table # Add domains to database table file
str="Adding to database table" parseList "${adlistID}" "${saveLocation}" "${target}"
echo -ne " ${INFO} ${str}..."
database_table_from_file "gravity" "${saveLocation}" "${adlistID}"
echo -e "${OVER} ${TICK} ${str}"
else else
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
fi fi
@ -535,6 +632,7 @@ gravity_Table_Count() {
local unique local unique
unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")" unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
echo -e " ${INFO} Number of ${str}: ${num} (${unique} unique domains)" echo -e " ${INFO} Number of ${str}: ${num} (${unique} unique domains)"
sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
else else
echo -e " ${INFO} Number of ${str}: ${num}" echo -e " ${INFO} Number of ${str}: ${num}"
fi fi
@ -686,10 +784,6 @@ fi
# Move possibly existing legacy files to the gravity database # Move possibly existing legacy files to the gravity database
migrate_to_database migrate_to_database
# Ensure proper permissions are set for the newly created database
chown pihole:pihole "${gravityDBfile}"
chmod g+w "${piholeDir}" "${gravityDBfile}"
if [[ "${forceDelete:-}" == true ]]; then if [[ "${forceDelete:-}" == true ]]; then
str="Deleting existing list cache" str="Deleting existing list cache"
echo -ne "${INFO} ${str}..." echo -ne "${INFO} ${str}..."
@ -704,15 +798,26 @@ gravity_DownloadBlocklists
# Create local.list # Create local.list
gravity_generateLocalList gravity_generateLocalList
gravity_ShowCount
# Migrate rest of the data from old to new database
gravity_swap_databases
# Update gravity timestamp
update_gravity_timestamp update_gravity_timestamp
gravity_Cleanup # Ensure proper permissions are set for the database
echo "" chown pihole:pihole "${gravityDBfile}"
chmod g+w "${piholeDir}" "${gravityDBfile}"
# Compute numbers to be displayed
gravity_ShowCount
# Determine if DNS has been restarted by this instance of gravity # Determine if DNS has been restarted by this instance of gravity
if [[ -z "${dnsWasOffline:-}" ]]; then if [[ -z "${dnsWasOffline:-}" ]]; then
"${PIHOLE_COMMAND}" restartdns reload "${PIHOLE_COMMAND}" restartdns reload
fi fi
gravity_Cleanup
echo ""
"${PIHOLE_COMMAND}" status "${PIHOLE_COMMAND}" status

@ -306,8 +306,8 @@ tailFunc() {
# Colour A/AAAA/DHCP strings as white # Colour A/AAAA/DHCP strings as white
# Colour everything else as gray # Colour everything else as gray
tail -f /var/log/pihole.log | sed -E \ tail -f /var/log/pihole.log | sed -E \
-e "s,($(date +'%b %d ')| dnsmasq[.*[0-9]]),,g" \ -e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
-e "s,(.*(gravity |black |regex | config ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \ -e "s,(.*(blacklisted |gravity blocked ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \ -e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
-e "s,.*,${COL_GRAY}&${COL_NC}," -e "s,.*,${COL_GRAY}&${COL_NC},"
exit 0 exit 0

Loading…
Cancel
Save