These scripts are used to process, gather, and sort data for analytics and to solve the reverse elliptic curve Problem. This allows the generation of millions of data points to reverse the functions. This was to locate exact variable for controlled shifts depending on location of midrange on generators.
Legacy Wallet (Uncompressed) Data Collector:
Data Analyzer (Checks Hashes over time):
import os
import re
from collections import defaultdict
# ------------------ CONFIGURATION ------------------ #
# Input folder where your generated output files reside.
INPUT_FOLDER = r"d:\DATA"
# Base output folder for our new organized results.
BASE_OUTPUT_FOLDER = r"d:\Checksum"
TOTAL_FOLDER = os.path.join(BASE_OUTPUT_FOLDER, "Total")
COUNT_FOLDER = os.path.join(BASE_OUTPUT_FOLDER, "Count")
# Create output folders if they don't exist.
os.makedirs(TOTAL_FOLDER, exist_ok=True)
os.makedirs(COUNT_FOLDER, exist_ok=True)
# Maximum number of lines per output file in the Total folder.
MAX_LINES_PER_FILE = 1_000_000
# Regular expression to extract:
# - 64-character hex key,
# - 8-character wif_checksum,
# - 8-character address_checksum,
# - 8-character pubkey_checksum.
# Assumes fields are separated by whitespace.
regex_pattern = r"hex:(\w{64})\s+wif_checksum:(\w{8})\s+address_checksum:(\w{8})\s+pubkey_checksum:(\w{8})"
# ------------------ SCRIPT FUNCTIONS ------------------ #
def process_files():
"""
Process all text files in INPUT_FOLDER, extracting hex keys and the three checksums.
Returns a list of tuples (hex_key, wif_checksum, address_checksum, pubkey_checksum).
"""
data = []
total_checked = 0
# Process each file in the INPUT_FOLDER.
for file_name in os.listdir(INPUT_FOLDER):
file_path = os.path.join(INPUT_FOLDER, file_name)
# Consider only .txt files and skip those ending with .log or containing "recovery"
if os.path.isfile(file_path) and file_name.endswith(".txt") and not (file_name.endswith(".log") or "recovery" in file_name.lower()):
try:
with open(file_path, "r", encoding="utf-8") as f:
for line in f:
m = re.search(regex_pattern, line)
if m:
hex_key, wif_cs, addr_cs, pub_cs = m.groups()
data.append((hex_key, wif_cs, addr_cs, pub_cs))
total_checked += 1
except Exception as e:
print(f"Error reading {file_name}: {e}")
print(f"Total lines processed: {total_checked}")
return data
def save_total_output(data):
"""
Save the extracted data to the TOTAL_FOLDER.
The data is split into sequential files each containing MAX_LINES_PER_FILE lines.
Each line will have: hex, wif_checksum, address_checksum, pubkey_checksum.
"""
file_index = 1
line_count = 0
current_file_lines = []
for entry in data:
hex_key, wif_cs, addr_cs, pub_cs = entry
# Format a line with all data:
line = f"hex:{hex_key} wif_checksum:{wif_cs} address_checksum:{addr_cs} pubkey_checksum:{pub_cs}\n"
current_file_lines.append(line)
line_count += 1
if line_count >= MAX_LINES_PER_FILE:
file_path = os.path.join(TOTAL_FOLDER, f"keys{file_index}.txt")
with open(file_path, "w", encoding="utf-8") as f_out:
f_out.writelines(current_file_lines)
print(f"Saved {file_path} with {line_count} lines.")
file_index += 1
line_count = 0
current_file_lines = []
# Write remaining lines if any
if current_file_lines:
file_path = os.path.join(TOTAL_FOLDER, f"keys{file_index}.txt")
with open(file_path, "w", encoding="utf-8") as f_out:
f_out.writelines(current_file_lines)
print(f"Saved {file_path} with {line_count} lines.")
def save_count_outputs(data):
"""
Count occurrences of each unique checksum for each of the three checksum types.
Write the counts to three separate files in the COUNT_FOLDER.
"""
wif_counts = defaultdict(int)
addr_counts = defaultdict(int)
pub_counts = defaultdict(int)
for entry in data:
_, wif_cs, addr_cs, pub_cs = entry
wif_counts[wif_cs] += 1
addr_counts[addr_cs] += 1
pub_counts[pub_cs] += 1
# Save each dictionary to its file.
with open(os.path.join(COUNT_FOLDER, "wif_counts.txt"), "w", encoding="utf-8") as f:
for cs, count in sorted(wif_counts.items()):
f.write(f"{cs}:{count}\n")
with open(os.path.join(COUNT_FOLDER, "address_counts.txt"), "w", encoding="utf-8") as f:
for cs, count in sorted(addr_counts.items()):
f.write(f"{cs}:{count}\n")
with open(os.path.join(COUNT_FOLDER, "pubkey_counts.txt"), "w", encoding="utf-8") as f:
for cs, count in sorted(pub_counts.items()):
f.write(f"{cs}:{count}\n")
print("Checksum counts saved.")
def main():
# Gather all data from input files.
data = process_files()
# Save total output (each file with MAX_LINES_PER_FILE lines).
save_total_output(data)
# Save count outputs for each checksum type.
save_count_outputs(data)
if __name__ == "__main__":
main()
Retrieving Checksum from an BTC Address:
import base58
def get_address_checksum(address: str) -> (str, str):
"""
Decodes a Base58Check-encoded Bitcoin address.
Returns a tuple (payload_hex, checksum_hex), where:
- payload_hex is the version byte + hash160 (in hex),
- checksum_hex is the 4-byte checksum (in hex).
"""
try:
decoded = base58.b58decode(address)
except Exception as e:
raise ValueError(f"Error decoding address: {e}")
if len(decoded) < 5:
raise ValueError("Decoded data is too short to contain a checksum.")
# The last 4 bytes are the checksum.
payload = decoded[:-4]
checksum = decoded[-4:]
return payload.hex(), checksum.hex()
def main():
btc_address = input("Enter a Bitcoin address: ").strip()
try:
payload_hex, checksum_hex = get_address_checksum(btc_address)
print("Decoded Payload (hex):", payload_hex)
print("Checksum (hex):", checksum_hex)
except Exception as e:
print(e)
if __name__ == "__main__":
main()
We need your consent to load the translations
We use a third-party service to translate the website content that may collect data about your activity. Please review the details in the privacy policy and accept the service to view the translations.