Bash printf syntax basics - LinuxConfig.org - Linux Tutorials

11-04 23:47 - 'DON'T USE THIS' (self.linux) by /u/CreeperTyE removed from /r/linux within 6-16min

'''
This is for cyberpatriots, pls don't use this.
#!/bin/bash
# CyberPatriot Ubuntu (Trusty Tahr) Script v0.3.5
# Root is required to run this script, but chmod should not be used on script or run as root.
# User running script must be in group 'sudo'.
#
# Not everything is covered in this script. Please make sure to review checklist and the Securing Debian Manual.
# This script is only meant to be used for whichever team Keita Susuki is on.
# CHANGES: sed is now more often used to find and replace instead of append to config files
function main {
kernel_info=$(uname -a)
time=$(date)
display_info=$(whoami)
sshd="/etc/ssh/sshd_config"
apache_s="/etc/apache2/apache2.conf"
vsftpd_s="/etc/vsftpd.conf"
echo "---------------------------------------------------------"
echo "Script version: v0.3.5"
echo "Current User: $display_info"
echo "Team: Binary Bros"
echo "Current Time: $time"
echo "Kernel info: $kernel_info"
echo "Now, what can I do for you today?"
echo "---------------------------------------------------------"
echo -en '\n'
read -p "Press ENTER to continue."
echo -en '\n'
echo "WARNING: IF YOU HAVE NEGLECTED TO COMPLETE THE FORENSICS QUESTIONS, IMMEDIATELY CTRL+C THIS SCRIPT."
echo "HAVE YOU COMPLETED ALL THE FORENSICS QUESTIONS? [Y/N]"
read -r forensic_questions
if [[ $forensic_questions == "y" || $forensic_questions == "Y" ]]; then
clear
echo "Good. Now let's start working."
elif [[ $forensic_questions == "n" || $forensic_questions == "N" ]]; then
echo "Finish the forensics questions and come back."
exit
else
echo "Error: bad input."
fi
echo "Before using apt, we need to check to see if sources.list hasn't been tampered with."
echo "Redirecting you to /etc/apt/sources.list in 5 seconds..."
sleep 5
sudo gedit /etc/apt/sources.list
echo "Securing /run/shm."
echo "r-- is dangerous, only on servers if there is no reason for /run/shm."
echo "Read only /run/shm can cause many programs to break. Be cautious."
echo -en '\n'
echo "Options:"
echo "Mount /run/shm r-- (read-only) [r]"
echo "Mount /run/shm rw- (read-write) [w]"
echo "Skip this method. [x]"
read -r shared_memory
if [[ $shared_memory == "r" || $shared_memory == "R" ]]; then
echo "none /run/shm tmpfs defaults,ro 0 0" | sudo tee -a /etc/fstab
echo "Done. Restart box after script has run its course."
elif [[ $shared_memory == "w" || $shared_memory == "w" ]]; then
echo "none /run/shm tmpfs rw,noexec,nosuid,nodev 0 0" | sudo tee -a /etc/fstab
echo "Done. Restart box after script has run its course."
elif [[ $shared_memory == "x" || $shared_memory == "X" ]]; then
echo "Understood. Check UnsafeDefaults page on Ubuntu's website."
fi
echo -en '\n'
echo "Next, we will check hosts file. Make sure nothing looks amiss (default config)."
echo "Redirecting you to hosts file in 5 seconds..."
sleep 5
sudo gedit /etc/hosts
echo -en '\n'
echo "See if nameserver is unfamiliar, if it is, change to google public (8.8.8.8)."
echo "Redirecting you in 3 seconds..."
sudo gedit /etc/resolv.conf
echo -en '\n'
echo "I will now install packages necessary for the security of the system."
echo -en '\n'
sudo apt-get -y -qq install rkhunter clamav clamtk gufw ufw libpam-cracklib vim nmap sysv-rc-conf bum unattended-upgrades logcheck lynis members auditd chkrootkit fail2ban
echo -en '\n'
echo "Configuring automatic upgrades.."
sudo dpkg-reconfigure --priority=low unattended-upgrades
echo "Would you like to manually use gufw or have the script automatically use ufw and close off ports?"
echo -en '\n'
echo "Options:"
echo "g: gufw"
echo "a: auto ufw"
echo "ga: ufw then manual gufw"
read -r firewall_config
if [[ $firewall_config == "g" || $firewall_config == "G" ]]; then
echo "Opening gufw in 5 seconds..."
sleep 5
sudo gufw
elif [[ $firewall_config == "a" || $firewall_config == "A" ]]; then
sudo ufw enable
sudo ufw deny 23
sudo ufw deny 2049
sudo ufw deny 515
sudo ufw deny 111
sudo ufw deny 9051
sudo ufw deny 31337
sudo ufw status
echo "Automatic configuration of firewall completed. I recommend that you look over this again."
sleep 10
elif [[ $firewall_config == "ga" || $firewall_config == "GA" ]]; then
sudo ufw enable
sudo ufw deny 23
sudo ufw deny 2049
sudo ufw deny 515
sudo ufw deny 111
sudo ufw deny 9051
sudo ufw deny 31337
sudo gufw
else
echo "Error: bad input."
fi
clear
echo -en '\n'
echo "Running nmap on 127.0.0.1 to display open ports..." # nmap isn't considered a "hacking tool"
echo "Would you also like to save output to nmap_output.txt [y/n]?"
echo -en '\n'
read -r nmap_input
if [[ $nmap_input == "y" || $nmap_input == "Y" ]]; then
echo "Sending output to nmap_output.txt.."
touch nmap_output.txt
echo "Running nmap on localhost again so you can see the output."
nmap -sV 127.0.0.1 > nmap_output.txt
sleep 10
echo -en '\n'
elif [[ $nmap_input == "n" || $nmap_input == "N" ]]; then
echo "Understood. Running nmap on localhost.."
nmap -sV 127.0.0.1
sleep 10
echo -en '\n'
else
echo "Error: bad input."
echo -en '\n'
fi
echo "Now please disable unneeded processes keeping ports open."
sleep 5
sudo sysv-rc-conf # preferred tool for this
echo -en '\n'
echo "Please make sure there is nothing besides exit 0 and some comments."
sleep 5
sudo vim /etc/rc.local
echo -en '\n'
echo "Checking for sshd_config file"
if [ -f "$sshd" ]; then
echo "sshd is present on this system."
echo "Is sshd a critical service on this machine? [y/n]"
echo "note: selecting N will remove sshd from this system. Proceed with caution."
read -r sshd_critical
if [[ $sshd_critical == "y" || $sshd_critical == "Y" ]]; then
sshd_secure_config
elif [[ $sshd_critical == "n" || $sshd_critical == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
echo -en '\n'
echo "Would you like to restart sshd? [y/n]"
read -r sshd_restart_uinput
if [[ $sshd_restart_uinput == "Y" || $sshd_restart_uinput == "y" ]]; then # may take points and then give back
echo "Restarting sshd..."
sudo service sshd restart
elif [[ $sshd_restart_uinput == "n" || $sshd_restart_uinput == "N" ]]; then
echo "Understood. Remember that changes will not happen until sshd is restarted."
else
echo "Error: bad input."
fi
fi
clear
echo -en '\n'
echo "Disabling guest user and turning off autologin. Editing /etc/lightdm/lightdm.conf"
echo "Checklist reference: GENERAL/8 Alpha, Bravo"
echo "Remember to restart lightdm or restart box later on."
echo "I will direct you there in 5 seconds."
sleep 5
sudo vim /etc/lightdm/lightdm.conf
echo -en '\n'
printf "Now, would you like for me to add some better settings for /etc/sysctl.conf? [y\n]"
read -r secure_sysctl
if [[ $secure_sysctl == "y" || $secure_sysctl == "Y" ]]; then
sysctl_secure_config
elif [[ $secure_sysctl == "n" || $secure_sysctl == "N" ]]; then
echo -en '\n'
echo "Understood, I recommend you do this manually however."
else
echo -en '\n'
echo "Error: bad input"
fi
echo -en '\n'
echo "Lock the root account? [y/n]"
read -r disable_root
echo -en '\n'
if [[ $disable_root == "y" || $disable_root == "Y" ]]; then
sudo passwd -l root
echo "Root account locked."
elif [[ $disable_root == "n" || $disable_root == "N" ]]; then
echo "Understood, manually lock please."
else
echo "Bad input."
fi
clear
echo -en '\n'
echo "Limit access to su to all users but the ones in group wheel? [y/n]"
echo -en '\n'
read -r lim_su
if [[ $lim_su == "y" || $lim_su == "Y" ]]; then
sudo chown [link]1 /bin/su sudo
chmod 04750 /bin/su
echo "Done."
elif [[ $lim_su == "n" || $lim_su == "N" ]]; then
echo "Remember to manually limit access to su! All it takes is a single uncomment..."
else
echo "Bad input."
fi
clear
if [[ -f "$apache_s" ]]; then
echo "Is apache2 supposed to be installed on this system? [y/n]"
echo "If you choose N then you will subsequently uninstall apache2. Be careful."
read -r apache2_que
if [[ $apache2_que == "y" || $apache2_que == "Y" ]]; then
echo "Understood, moving on to securing apache2."
apache2_secure
elif [[ $apache2_que == "n" || $apache2_que == "N" ]]; then
echo "Uninstalling apache2..."
sudo service apache2 stop
sudo apt-get purge apache2
else
echo "Bad input."
fi
else
echo "Apache2 is not installed, moving on."
fi
if [[ -f "$vsftpd_s" ]]; then
echo "vsftpd configuration file detected."
echo "Is vsftpd a critical service on this machine? [y/n]"
echo "If you choose N then you will subsequently uninstall vsftpd. Be careful."
read -r vsftpd_choice
if [[ $vsftpd_choice == "y" || $vsftpd_choice == "Y" ]]; then
echo "Understood, moving on to securing vsftpd."
vsftpd_secure
elif [[ $vsftpd_choice == "n" || $vsftpd_choice == "N" ]]; then
sudo service vsftpd stop
sudo apt-get purge vsftpd
else
echo "Bad input."
fi
else
echo "vsftpd is not installed on this machine, moving on."
fi
clear
echo "Check apparmor? [y/n]"
read -r apparmor_check
if [[ $apparmor_check == "y" || $apparmor_check == "Y" ]]; then
apparmor_fix
elif [[ $apparmor_check == "n" || $apparmor_check == "N" ]]; then
echo "Understood, moving on."
echo -en '\n'
else
echo "Error: bad input."
fi
echo -en '\n'
echo "Deny su to non admins? [y/n]"
echo -en '\n'
read -r deny_su
if [[ $deny_su == "y" || $deny_su == "Y" ]]; then
sudo dpkg-statoverride --update --add root sudo 4750 /bin/su
echo "Done."
elif [[ $deny_su == "n" || $deny_su == "N" ]]; then
sudo "Understood, moving on."
else
echo "Error: bad input."
fi
echo -en '\n'
echo "Secure home directory? [y/n]"
echo "NOTE: potentially dangerous."
echo -en '\n'
read -r home_secure
if [[ $home_secure == "y" || $home_secure == "Y" ]]; then
echo "What is your username?"
echo "I need it so I can chmod 0700 your home directory."
read -r username_uinput
sudo chmod 0700 /home/"$username_uinput"
echo "Thanks!."
elif [[ $home_secure == "n" || $home_secure == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
clear
echo -en '\n'
echo "Prevent IP spoofing? [y/n]"
echo "(/etc/host.conf)"
read -r ip_spoof
echo -en '\n'
if [[ $ip_spoof == "y" || $ip_spoof == "Y" ]]; then
echo "order bind,hosts" | sudo tee -a /etc/host.conf
echo "nospoof on" | sudo tee -a /etc/host.conf
echo "IP spoofing disabled."
elif [[ $ip_spoof == "n" || $ip_spoof == "N" ]]; then
echo "Understood, skipping disabling ip spoofing."
else
echo "Error: bad input."
fi
echo "Would you like to edit /etc/pam.d? [y/n]"
read -r pam_secure
if [[ $pam_secure == "y" || $pam_secure == "Y" ]]; then
echo "Use subroutine pam_secure? [y/n]"
read -r choose_pam_secure
if [[ $choose_pam_secure == "y" || $choose_pam_secure == "Y" ]]; then
pam_secure
elif [[ $choose_pam_secure == "n" || $choose_pam_secure == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
echo "Redirecting you to /etc/pam.d/common-password. Use checklist."
echo "Checklist reference: GENERAL/10 ALPHA"
echo -en '\n'
sleep 5
sudo vim /etc/pam.d/common-password
echo -en '\n'
echo "Redirecting you to /etc/pam.d/common-auth. Use checklist."
echo "Checklist reference: GENERAL/10 BRAVO"
sleep 5
sudo vim /etc/pam.d/common-auth
echo -en '\n'
echo "Redirecting you to /etc/login.defs. Use checklist."
echo "Checklist reference: GENERAL/10 CHARLIE"
sleep 5
sudo vim /etc/login.defs
elif [[ $pam_secure == "n" || $pam_secure == "N" ]]; then
echo "Understood, will skip securing pam.d. Make sure to use the checklist and do so manually."
else
echo "Sorry, bad input."
fi
clear
echo -en '\n'
echo "Would you like to delete media files? [y/n]"
echo "Warning: Feature untested due to obvious reasons."
echo -en '\n'
read -r media_input
if [[ $media_input == "y" || $media_input == "Y" ]]; then
sudo find / -name '*.mp3' -type f -delete
sudo find / -name '*.mov' -type f -delete
sudo find / -name '*.mp4' -type f -delete
sudo find / -name '*.avi' -type f -delete
sudo find / -name '*.mpg' -type f -delete
sudo find / -name '*.mpeg' -type f -delete
sudo find / -name '*.flac' -type f -delete
sudo find / -name '*.m4a' -type f -delete
sudo find / -name '*.flv' -type f -delete
sudo find / -name '*.ogg' -type f -delete
sudo find /home -name '*.gif' -type f -delete
sudo find /home -name '*.png' -type f -delete
sudo find /home -name '*.jpg' -type f -delete
sudo find /home -name '*.jpeg' -type f -delete
elif [[ $media_input == "n" || $media_input == "N" ]]; then
echo "Understood, manually search and destroy media files."
else
echo "Error: bad input."
fi
echo -en '\n'
clear
echo "Would you like to install updates? [y/n]"
read -r update_input
if [[ $update_input == "y" || $update_input == "Y" ]]; then
sudo apt-get -qq -y update
sudo apt-get -qq -y upgrade
sudo apt-get -qq -y dist-upgrade
sudo apt-get -qq -y autoremove
elif [[ $update_input == "n" || $update_input == "N" ]]; then
echo "Understood, moving on."
echo -en '\n'
else
echo "Error: bad input."
echo -en '\n'
fi
sudo freshclam
clear
echo "Run chkrootkit and rkhunter? [y/n]"
read -r rootkit_chk
if [[ $rootkit_chk == "y" || $rootkit_chk == "Y" ]]; then
touch rkhunter_output.txt
echo "Rkhunter output file created as rkhunter_output.txt."
touch chkrootkit_output.txt
echo "chkrootkit output file created as chkrootkit_output.txt."
sudo chkrootkit | tee chkrootkit_output.txt
sudo rkhunter -c | tee rkhunter_output.txt
elif [[ $rootkit_chk == "n" || $rootkit_chk == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
sudo clamscan -r /
clear
echo -en '\n'
sleep 5
touch lynis_output.txt
echo "Lynis output file created as lynis_output.txt."
sudo lynis -c | tee lynis_output.txt
echo "Enable apparmor? [y/n]"
read -r apparmor_enabling
if [[ $apparmor_enabling == "y" || $apparmor_enabling == "Y" ]]; then
sudo perl -pi -e 's,GRUB_CMDLINE_LINUX="(.*)"$,GRUB_CMDLINE_LINUX="$1 apparmor=1 security=apparmor",' /etc/default/grub
sudo update-grub
elif [[ $apparmor_enabling == "n" || $apparmor_enabling == "N" ]]; then
echo "Understood, you should enable it however."
else
echo "Error: bad input."
fi
echo "The script has run it's course."
echo "Remember to manually check config files and finish any changes."
echo -en '\n'
echo "--------------------------------------------------------"
echo "INFORMATION"
echo "--------------------------------------------------------"
echo "Current User: $display_info"
echo "Current Time: $time"
echo "Kernel info: $kernel_info"
echo "--------------------------------------------------------"
echo -en '\n'
read -p "Press ENTER to reboot the system."
sudo reboot
}
function apache2_secure {
sudo apt-get -y install libapache2-modsecurity
sudo apt-get -y install libapache2-modevasive
sudo sed -i 's/^#?ServerSignature .*/ServerSignature Off/g' /etc/apache2/conf-enabled/security.conf
sudo sed -i 's/^#?ServerTokens .*/ServerTokens Off/g' /etc/apache2/conf-enabled/security.conf
sudo sed -i 's/^#?Options .*/Options None/g' /etc/apache2/apache2.conf
sudo sed -i 's/^#?AllowOverride .*/AllowOverride None/g' /etc/apache2/apache2.conf
sudo sed -i 's/^#?Require*/Require all granted/g' /etc/apache2/apache2.conf
sudo sed -i 's/^#?LimitRequestBody*/LimitRequestBody 204800/g' /etc/apache2/apache2.conf
echo "" | sudo tee -a /etc/apache2/apache2.conf
echo "Order deny, allow" | sudo tee -a /etc/apache2/apache2.conf
echo "Deny from all" | sudo tee -a /etc/apache2/apache2.conf
echo "Check if mod_security module is running..."
echo "
" | sudo tee -a /etc/apache2/apache2.conf
sudo sed -i 's/^#?Timeout*/Timeout 15/g' /etc/apache2/apache2.conf
sudo sed -i 's/^#?LimitXMLRequestBody*/LimitXMLRequestBody 204800/' /etc/apache2/apache2.conf
sudo apachectl -M | grep --color security
echo "Is mod_security on? It should say security2_module somewhere."
read -r security_a2_on
if [[ $security_a2_on == "y" || $security_a2_on == "Y" ]]; then
echo "Good. I will move on."
elif [[ $security_a2_on == "n" || $security_a2_on == "N" ]]; then
sudo mv /etc/modsecurity/modsecurity.conf-recommended /etc/modsecurity/modsecurity.conf
sudo sed -i 's/^#?SecRuleEngine .*/SecRuleEngine On/g' /etc/modsecurity/modsecurity.conf
sudo service apache2 restart
else
echo "Error: bad input."
fi
return 1
}
function pam_secure {
sudo sed -i 's/^#?PASS_MAX_DAYS .*/PASS_MAX_DAYS 90/g' /etc/login.defs
sudo sed -i 's/^#?PASS_MIN_DAYS .*/PASS_MIN_DAYS 7/g' /etc/login.defs
sudo sed -i 's/^#?PASS_WARN_AGE .*/PASS_WARN_AGE 7/g' /etc/login.defs
echo "Setup failed login attempts in /etc/pam.d/common-auth and add some config changes? [y/n]"
read -r fail_pamd_ca
if [[ $fail_pamd_ca == "y" || $fail_pamd_ca == "Y" ]]; then
echo "auth optional pam_tally.so deny=5 unlock_time=900 onerr=fail audit even_deny_root_account silent" | sudo tee -a /etc/pam.d/common-auth
sudo sed -i 's/^#?pam_unix.so .*/password [success=1 default=ignore] pam_unix.so obscure use_authtok try_first_pass sha512
remember=10 minlen=8 difok=5/g' /etc/pam.d/common-password
elif [[ $fail_pamd_ca == "n" || $fail_pamd_ca == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
echo "Create brutally paranoid configuration for /etc/pam.d/other? [y/n]"
echo "NOTE: IF PAM FILES ARE DELETED ACCIDENTALLY, SYSTEM FAILURE MAY OCCUR."
read -r other_paranoid
if [[ $other_paranoid == "y" || $other_paranoid == "Y" ]]; then
echo "auth required pam_deny.so" | sudo tee -a /etc/pam.d/other
echo "auth required pam_warn.so" | sudo tee -a /etc/pam.d/other
echo "account required pam_deny.so" | sudo tee -a /etc/pam.d/other
echo "account required pam_warn.so" | sudo tee -a /etc/pam.d/other
echo "password required pam_deny.so" | sudo tee -a /etc/pam.d/other
echo "password required pam_warn.so" | sudo tee -a /etc/pam.d/other
echo "session required pam_deny.so" | sudo tee -a /etc/pam.d/other
echo "session required pam_warn.so" | sudo tee -a /etc/pam.d/other
elif [[ $other_paranoid == "n" || $other_paranoid == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
return 1
}
function vsftpd_secure {
sudo sed -i 's/^anonymous_enable=.*/anonymous_enable=NO/g' /etc/vsftpd.conf
echo "Anonymous FTP login disabled."
sudo sed -i 's/^chroot_local_user=.*/chroot_local_user=YES/g' /etc/vsftpd.conf
echo "Local users restricted to their home directories."
echo "Create SSL/TLS certificate and private key for vsftpd server? [y/n]"
read -r ssl_vsftpd
if [[ $ssl_vsftpd == "y" || $ssl_vsftpd == "Y" ]]; then
sudo openssl req -x509 -days 365 -newkey [link]2 -nodes -keyout /etc/vsftpd.pem -out /etc/vsftpd.pem
echo "Created."
echo "Making config changes..."
sudo sed -i 's/^#?ssl_enable=.*/ssl_enable=YES/g' /etc/vsftpd.conf #enable tls/ssl
echo "SSL enabled."
sudo sed -i 's/^#?allow_anon_ssl=.*/allow_anon_ssl=NO/g' /etc/vsftpd.conf
sudo sed -i 's/^#?force_local_data_ssl=.*/force_local_data_ssl=YES/g' /etc/vsftpd.conf
sudo sed -i 's/^#?force_local_logins_ssl=.*/force_local_logins_ssl=YES/g' /etc/vsftpd.conf
sudo sed -i 's/^#?ssl_tlsv1=.*/ssl_tlsv1=YES/g' /etc/vsftpd.conf
sudo sed -i 's/^#?ssl_sslv2=.*/ssl_sslv2=NO/g' /etc/vsftpd.conf
sudo sed -i 's/^#?ssl_sslv3=.*/ssl_sslv3=NO/g' /etc/vsftpd.conf
sudo sed -i 's/^#?require_ssl_reuse=.*/require_ssl_reuse=NO/g' /etc/vsftpd.conf
sudo sed -i 's/^#?ssl_ciphers=.*/ssl_ciphers=HIGH/g' /etc/vsftpd.conf
sudo sed -i 's/^#?rsa_cert_file=.*/rsa_cert_file=/etc/vsftpd.pem/g' /etc/vsftpd.conf
sudo sed -i 's/^#?rsa_private_key_file=.*/rsa_private_key_file=/etc/vsftpd.pem/g' /etc/vsftpd.conf
sudo sed -i 's/^#?pasv_max_port=.*/pasv_max_port=65535/g' /etc/vsftpd.conf
sudo sed -i 's/^#?pasv_min_port=.*/pasv_min_port=64000/g' /etc/vsftpd.conf
sudo sed -i 's/^#?local_max_rate=.*/local_max_rate=30000/g' /etc/vsftpd.conf
sudo sed -i 's/^#?idle_session_timeout=.*/idle_session_timeout=120/g' /etc/vsftpd.conf
sudo sed -i 's/^#?max_per_ip=.*/max_per_ip=15/g' /etc/vsftpd.conf
sudo sed -i 's/^#?xferlog_enable=.*/xferlog_enable=YES/g' /etc/vsftpd.conf
sudo sed -i 's/^#?xferlog_std_format=.*/xferlog_std_format=NO/g' /etc/vsftpd.conf
sudo sed -i 's/^#?xferlog_file=.*/xferlog_file=/valog/vsftpd.log/g' /etc/vsftpd.conf
echo "Log file set at /valog/vsftpd.log"
sudo sed -i 's/^#?log_ftp_protocol=.*/log_ftp_protocol=YES/g' /etc/vsftpd.conf
sudo sed -i 's/^#?debug_ssl=.*/debug_ssl=YES/g' /etc/vsftpd.conf
echo "Configuration changes complete. Check /etc/vsftpd.conf later to see if they have all been done."
echo -en '\n'
echo "[link]3 "
echo -en '\n'
echo "Adding firewall exceptions.."
sudo ufw allow 20
sudo ufw allow 21
sudo ufw allow 64000:65535/tcp
sudo iptables -I INPUT -p tcp --dport 64000:65535 -j ACCEPT
elif [[ $ssl_vsftpd == "n" || $ssl_vsftpd == "N" ]]; then
echo "Understood. However, this is recommended."
else
echo "Error: bad input."
fi
echo "Restart vsftpd? [y/n]"
read -r vsftpd_restart
if [[ $vsftpd_restart == "y" || $vsftpd_restart == "Y" ]]; then
sudo service vsftpd restart
elif [[ $vsftpd_restart == "n" || $vsftpd_restart == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
return 1
}
function apparmor_fix {
if [ -f /ussbin/apparmor_status ]; then
echo "Apparmor already installed."
else
echo "Apparmor not installed, installing."
sudo apt-get install -y -qq apparmor apparmor-profiles apparmor-utils
echo "Apparmor will be enabled at the end of the script."
fi
return 1
}
function sshd_secure_config {
sudo sed -i 's/^#?PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config
return 1
sudo sed -i 's/^#?PermitEmptyPasswords .*/PermitEmptyPasswords no/' /etc/ssh/sshd_config
sudo sed -i 's/^#?Port .*/Port 2223/' /etc/ssh/sshd_config
sudo sed -i 's/^#?X11Forwarding .*/X11Forwarding no/' /etc/ssh/sshd_config
sudo ufw allow 2223
sudo sed -i 's/^#?Protocol .*/Protocol 2/' /etc/ssh/sshd_config
sudo sed -i 's/^#?PrintLastLog .*/PrintLastLog no/' /etc/ssh/sshd_config
sudo sed -i 's/^#?IgnoreRhosts .*/IgnoreRhosts yes/' /etc/ssh/sshd_config
sudo sed -i 's/^#?RhostsAuthentication .*/RhostsAuthentication no/' /etc/ssh/sshd_config
sudo sed -i 's/^#?RSAAuthentication .*/RSAAuthentication yes/' /etc/ssh/sshd_config
sudo sed -i 's/^#?HostbasedAuthentication .*/HostbasedAuthentication no/' /etc/ssh/sshd_config
sudo sed -i 's/^#?LoginGraceTime .*/LoginGraceTime 60/' /etc/ssh/sshd_config
sudo sed -i 's/^#?MaxStartups .*/MaxStartups 4/' /etc/ssh/sshd_config
echo "Automatic configuration complete."
sudo sed -i 's/^#?LogLevel .*/LogLevel VERBOSE/' /etc/ssh/sshd_config
echo "ClientAliveInterval 300" | sudo tee -a /etc/ssh/sshd_config
echo "ClientAliveCountMax 0" | sudo tee -a /etc/ssh/sshd_config
sudo sed -i 's/^#?StrictModes .*/StrictModes yes/' /etc/ssh/sshd_config
clear
echo "Use iptables to try to prevent bruteforcing? [y/n]"
read -r iptable_ssh
if [[ $iptable_ssh == "y" || $iptable_ssh == "Y" ]]; then
iptables -A INPUT -p tcp --dport 2223 -m state --state NEW -m recent --set --name ssh --rsource
iptables -A INPUT -p tcp --dport 2223 -m state --state NEW -m recent ! --rcheck --seconds 60 --hitcount 4 --name ssh --rsource -j ACCEPT
echo "Done."
elif [[ $iptable_ssh == "n" || $iptable_ssh == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
echo "Use public/private keys for authentication instead of passwords? [y/n]"
read -r auth_private
if [[ $auth_private == "y" || $auth_private == "Y" ]]; then
sudo ssh-keygen -t rsa
clear
sudo chmod 700 ~/.ssh
sudo chmod 600 ~/.ssh/id_rsa
cat id_rsa.pub >> ~/.ssh/authorized_keys
sudo chmod 600 ~/.ssh/authorized_keys
restorecon -Rv ~/.ssh
sudo sed -i 's/^#?PasswordAuthentication .*/PasswordAuthentication no/' /etc/ssh/sshd_config
elif [[ $auth_private == "n" || $auth_private == "N" ]]; then
echo "Understood, moving on."
else
echo "Error: bad input."
fi
return 1
}
function sysctl_secure_config {
echo "kernel.sysrq = 0" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.conf.all.accept_source_route = 0" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.conf.all.accept_redirects = 0" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.conf.all.rp_filter = 1" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.conf.all.log_martians = 1" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.icmp_ignore_bogus_error_responses = 1" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.icmp_echo_ignore_all = 1" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.icmp_echo_ignore_broadcasts = 1" | sudo tee -a /etc/sysctl.conf
echo "net.ipv4.tcp_syncookies=1" | sudo tee -a /etc/sysctl.conf
clear
echo -en '\n'
echo "Disable IPv6? [y/n]"
echo -en '\n'
read -r ipv6_disable
if [[ $ipv6_disable == "y" || $ipv6_disable == "Y" ]]; then
echo "net.ipv6.conf.all.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf
echo "IPv6 disabled."
elif [[ $ipv6_disable == "n" || $ipv6_disable == "N" ]]; then
echo "Understood, skipping disabling IPv6."
else
echo "Error: bad input."
fi
return 1
}
if [ "$(id -u)" != "0" ]; then
echo "Please run this script as root. I promise I won't dd /dev/urandom into /dev/sda..."
exit
else
main
fi
'''
DON'T USE THIS
Go1dfish undelete link
unreddit undelete link
Author: CreeperTyE
1: root:admin 2: rsa:2048 3: *modulo.co*/*e*u*e-ftp*s*rvi*e-v*ftp*-linux*ht*l
Unknown links are censored to prevent spreading illicit content.
submitted by removalbot to removalbot [link] [comments]

more related issues


more related issues
in the conversion of old and new systems, the most difficult one is uuuuuuuuuuuuuuu.

  1. Among the following options, the one that does not belong to the combination of two parameters, one change and three combinations:
    the form control that can accept numerical data input is.

Internal gateway protocol is divided into: distance vector routing protocol, and hybrid routing protocol.

Firewall can prevent the transmission of infected software or files
among the following coupling types, the lowest coupling degree is ().

The () property of the Navigator object returns the platform and version information of the browser.

What are the main benefits of dividing IP subnets? ()
if users want to log in to the remote server and become a simulation terminal of the remote server temporarily, they can use the
[26-255] software life cycle provided by the remote host, which means that most operating systems, such as DOS, windows, UNIX, etc., adopt tree structureFolder structure.

An array is a group of memory locations related by the fact that they all have __________ name and __________ Type.
in Windows XP, none of the characters in the following () symbol set can form a file name. [2008 vocational college]
among the following options, the ones that do not belong to the characteristics of computer viruses are:
in the excel 2010 cell Format dialog box, the nonexistent tab is
the boys___ The teacher talked to are from class one.
for an ordered table with length of 18, if the binary search is used, the length of the search for the 15th element is ().

SRAM memory is______ Memory.

() is a website with certain complementary advantages. It places the logo or website name of the other party's website on its own website, and sets the hyperlink of each other's website, so that users can find their own website from the cooperative website and achieve the purpose of mutual promotion.

  1. Accounting qualification is managed by information technology ()
    which of the following devices can forward the communication between different VLANs?

The default port number of HTTP hypertext transfer protocol is:
forIn the development method of object, () will be the dominant standard modeling language in the field of object-oriented technology.

When you visit a website, what is the first page you see?

File D:\\ city.txt The content is as follows: Beijing Tianjin Shanghai Chongqing writes the following event process: privatesub form_ click() Dim InD Open \d:\\ city.txt \For input as ? 1 do while not EOF (1) line input ? 1, Ind loop close 1 print ind End Sub run the program, click the form, and the output result is.

When users use dial-up telephone lines to access the Internet, the most commonly used protocol is.

In the I2C system, the main device is usually taken by the MCU with I2C bus interface, and the slave device must have I2C bus interface.

The basic types of market research include ()
the function of the following program is: output all integers within 100 that can be divisible by 3 and have single digits of 6. What should be filled in the underline is (). 56b33287e4b0e85354c031b5. PNG
the infringement of the scope of intellectual property rights is:
multimedia system is a computer that can process sound and image interactivelySystem.

In order to allow files of different users to have the same file name, () is usually used in the file system.

The following () effects are not included in PowerPoint 2010 animation effects.

Macro virus can infect________ Documents.

The compiled Java program can be executed directly.

In PowerPoint, when adding text to a slide with AutoShape, how to indicate that text can be edited on the image when an AutoShape is selected ()
organizational units can put users, groups, computers and other units into the container of the active directory.

Ethernet in LAN adopts the combination technology of packet switching and circuit switching. ()
interaction designers need to design information architecture and interface details.

In the process of domain name resolution, the local domain name server queries the root domain name server by using the search method.

What stage of e-commerce system development life cycle does data collection and processing preparation belong to?

Use the "ellipse" tool on the Drawing toolbar of word, press the () key and drag the mouse to draw a circle.

The proportion of a country's reserve position in the IMF, including the convertible currency part of the share subscribed by Member States to the IMF, and the portion that can be paid in domestic currency, respectively.

  1. When installing Windows 7 operating system, the system disk partition must be in format before installation.

High rise buildings, public places of entertainment and other decoration, in order to prevent fire should be used____。 ()
with regard to the concept of area in OSPF protocol, what is wrong in the following statements is ()
suppose that the channel bandwidth is 4000Hz and the modulation is 256 different symbols. According to the Nyquist theorem, the data rate of the ideal channel is ()
which of the following is the original IEEE WLAN standard ()?

What is correct about data structure is:
the key deficiency of waterfall model is that ().

The software development mode with almost no product plan, schedule and formal development process is
in the following description of computers, the correct one is ﹥
Because human eyes are sensitive to chroma signal, the sampling frequency of luminance signal can be lower than that of chroma signal when video signal is digitized, so as to reduce the amount of digital video data.

[47-464] what is correct in the following statements is
ISO / IEC WG17 is responsible for the specific drafting, discussion, amendment, formulation, voting and publication of the final ISO international standards for iso14443, iso15693 and iso15693 contactless smart lock manufacturers smart card standards.

Examples of off - balance - sheet activities include _________

The correct description of microcomputer is ().

Business accident refers to the accident caused by the failure of operation mechanism of tourism service department. It can be divided into ().

IGMP Network AssociationWhat is the function of the discussion?

Using MIPS as the unit to measure the performance of the computer, it refers to the computer______

In the excel workbook, after executing the following code, the value of cell A3 of sheet 1 is________ Sub test1() dim I as integer for I = 1 to 5 Sheet1. Range (\ \ a \ \ & I) = I next inend sub
What are the characteristics of electronic payment compared with traditional payment?

When the analog signal is encoded by linear PCM, the sampling frequency is 8kHz, and the code energy control unit is 8 bits, then the information transmission rate is ()
  1. The incorrect discussion about the force condition of diesel engine connecting rod is.

Software testing can be endless.

The game software running on the windows platform of PC is sent to the mobile phone of Android system and can run normally.

The following is not true about the video.

The way to retain the data in the scope of request is ()
distribution provides the basis and support for the development of e-commerce.

  1. Which of the following belong to the content of quality control in the analysis
    1. During the operation of a program, the CNC system appears "soft limit switch overrun", which belongs to
    2. The wrong description of the gas pipe is ()
    3. The following statement is wrong: ()
    the TCP / IP protocol structure includes () layer.

Add the records in table a to table B, and keep the original records in table B. the query that should be used is.

For additives with product anti-counterfeiting certification mark, after confirming that the product is in conformity with the factory quality certificate and the real object, one copy () shall be taken and pasted on the ex factory quality certificate of the product and filed together.

() accounts are disabled by default.

A concept of the device to monitor a person's bioparameters is that it should.
  1. For the cephalic vein, the wrong description is
    an image with a resolution of 16 pixels × 16 pixels and a color depth of 8 bits, with the data capacity of at least______ Bytes. (0.3 points)
  2. What are the requirements for the power cord of hand-held electric tools?

In the basic mode of electronic payment, credit card belongs to () payment system.

The triode has three working states: amplification, saturation and cut-off. In the digital circuit, when the transistor is used as a switch, it works in two states of saturation or cut-off.

Read the attached article and answer the following: compared with today's music, those of the past
() refers to the subjective conditions necessary for the successful completion of an activity.

In the OSI reference model, what is above the network layer is_______ 。

The decision tree corresponding to binary search is not only a binary search tree, but also an ideal balanced binary tree. In order to guide the interconnection, interoperability and interoperability of computer networks, ISO has issued the OSI reference model, and its basic structure is divided into
26_______ It belongs to the information system operation document.

In C ? language, the following operators have the highest priority___ ?
the full Chinese name of BPR is ()
please read the following procedures: dmain() {int a = 5, B = 0, C = 0; if (a = B + C) printf (\ * * \ n \); else printf (\ $$n \);} the above programs
() software is not a common tool for web page making.

When a sends a message to B, in order to achieve security, a needs to encrypt the message with ().

The Linux exchange partition is used to save the visited web page files.

  1. Materials consumed by the basic workshop may be included in the () cost item.

The coverage of LAN is larger than that of Wan.

Regarding the IEEE754 standard of real number storage, the wrong description is______

Task 4: convert decimal number to binary, octal and hexadecimal number [Topic 1] (1134.84375) 10 = () 2=()8 = () 16
the purpose of image data compression is to ()
in IE browser, to view the frequently visited sites that have been saved, you need to click.

  1. When several companies jointly write a document, the document number of each company should be quoted in the header at the same time. ()
    assuming that the highest frequency of analog signal is 10MHz, and the sampling frequency must be greater than (), then the sample signal can not be distorted.

The incredible performing artist from Toronto.
in access, the relationship between a table and a database is.

In word 2010, the following statement about the initial drop is correct.

Interrupt service sub function does not need to be called in the program, but after applying for interrupt, the CPU automatically finds the corresponding program according to the interrupt number.

Normal view mode is the default view mode for word documents.

A common variable is defined as follows: Union data {int a; int b; float C;} data; how much memory space does the variable data occupy in VC6.0?

______ It is not a relational database management system.

In the basic model of decision support system, what is in the core position is:
among the following key factors of software outsourcing projects, () is the factor that affects the final product quality and production efficiency of software outsourcing.

Word Chinese textThe shortcut for copying is ().
submitted by Amanda2020-jumi to u/Amanda2020-jumi [link] [comments]

Tutorial: Using Borg for backup your QNAP to other devices (Advanced - CLI only)

Tutorial: Using Borg for backup your QNAP to other devices (Advanced - CLI only)
This tutorial will explain how to use Borg Backup to perform backups. This tutorial will specifically be aimed to perform backups from our QNAP to another unit (another NAS in your LAN, external hard drive, any off-site server, etc). But it is also a great tool to backup your computers to your NAS. This tutorial is a little bit more technical than the previous, so, be patient :)
MASSIVE WALL OF TEXT AHEAD. You have been warned.
Why Borg instead of, let’s say HBS3? Well, Borg is one of the best -if not THE BEST- backup software available. It is very resilient to failure and corruption. Personally I’m in love with Borg. It is a command line based tool. That means that there is no GUI available (there are a couple of front-end created by community, though). I know that can be very intimidating at first when you are not accustomed to it, and that it looks ugly, but honestly, it is not so complicated, and if you are willing to give it a try, I can assure you that is simple and easy. You might even like it over time!
https://www.borgbackup.org/
That aside, I have found that HBS3 can only perform incremental backups when doing QNAP-QNAP backups. It can use Rsync to save files to a non-QNAP device, but then you can’t use incremental (and IIRC, neither Deduplication or encryption). It will even refuse to save to a mounted folder using hybrid mount. QNAP seems to be trying to subtle lock you down in their ecosystem. Borg has none of those limitations.

Main pros of Borg Backup:
- VERY efficient and powerful
- Space efficient thanks to deduplication and compression
- Allows encryption, deduplication, incremental, compression… you name it.
- Available in almost any OS (except Windows) and thanks to Docker, even in Windows. There are also ARM binaries, so it is Raspberry compatible, and even ARM based QNAPs that don’t support docker can use it!!!
- Since it’s available in most OS, you can use a single unified solution for all your backups.
- Can make backups in PUSH and PULL style. Either each machine with Borg pushes the files into the server, or a single server with Borg installed pulls the files from any device without needing to install Borg on those devices.
- It is backed by a huge community with tons of integration and wrapper tools (https://github.com/borgbackup/community)
- Supports Backup to local folders, LAN backups using NFS or SMB, and also remote backups using SFTP or mounting SSHFS.
- IT IS FOSS. Seriously, guys, whenever possible, choose FOSS.

Cons of Borg Backup:
- It is not tailored for backups to cloud services like Drive or Mega. You might want to take a look at Rclone or Restic for that.
- It lacks GUI, so everything is CLI controlled. I know, it can be very intimidating, but once you have used it for a couple of days, you will notice how simple and comfortable to use is.

The easiest way to run Borg is to just grab the appropriate prebuilt binary (https://github.com/borgbackup/borg/releases) and run it baremetal, but I’m going to show how to install Borg in a docker container so you can apply this solution to any other scenario where docker is available. If you want to skip the container creation, just proceed directly to step number 2.

**FIRST STEP: LET'S BUILD THE CONTAINER**
There is currently no official Borg prebuilt container (although there are non-official ones). Since it’s a CLI tool, you don’t really need a prebuilt container, you can just use your preferred one (Ubuntu, Debian, Alpine etc) and install Borg directly in your container. We are using a ubuntu:latest container because the available Borg version for ubuntu is up to date. For easiness, all those directories we want to backup will be mounted inside the container in /output.
If you already are familiar with SSH and container creation though CLI, just user this template, substituting your specific directories mount.
docker run -it \ --cap-add=NET_ADMIN \ --net=bridge \ --privileged \ --cap-add SYS_ADMIN \ --device /dev/fuse \ --security-opt apparmor:unconfined \ --name=borgbackup \ -v /share/Movies:/output/Movies:ro \ -v /share/Important/Documents:/output/Documents:ro \ -v /share/Other:/output/Other:ro \ -v /share/Containeborgbackup/persist:/persist \ -v /etc/localtime:/etc/localtime:ro \ ubuntu:latest 
(REMEMBER: LINUX IS CAPITAL SENSIBLE, SO CAPITALS MATTER!!)
Directories to be backup are mounted as read only (:ro) for extra safety. I have also found that mounting another directory as “persistent” directory makes easy to create and edit the needed scripts directly from File Finder in QNAP, and also allows to keep them in case you need to destroy or recreate the container: this is the “/persist” directory. Use your favorite path.
If you are not familiar with SSH, first go here to learn how to activate and login into your QNAP using SSH (https://www.qnap.com/en/how-to/knowledge-base/article/how-to-access-qnap-nas-by-ssh/).
You can also use the GUI in Container Station to create the container and mount folders in advanced tab during container creation. Please, refer to QNAP’s tutorials about Docker.
GUI example
If done correctly, you will see that this container appears in the overview tab of Container Station. Click the name, and then click the two arrows. That will transport you to another tab inside the container to start working.
https://preview.redd.it/5y09skuxrvj41.jpg?width=1440&format=pjpg&auto=webp&s=19e4b22d6458d2c9a8143c9841f070828bcf5170

**SECOND STEP: INSTALLING BORG BACKUP INSIDE THE CONTAINER**
First check that the directory with all the data you want to backup (/output in our example) is mounted. If you can’t see anything, then you did something wrong in the first step when creating the container. If so, delete the container and try again. Now navigate to /persist using “cd /persist”
See how /output contains to-be-backup directories
Now, we are going to update ubuntu and install some dependencies and apps we need to work. Copy and paste this:
apt update && apt upgrade -y apt install -y nano fuse software-properties-common nfs-common ssh 
It will install a lot of things. Just let it work. When finished, install borgbackup using
add-apt-repository -y ppa:costamagnagianfranco/borgbackup apt install -y borgbackup 
When it’s finished, run “borg --version” and you will be shown the current installed version (at time of writing this current latest is 1.1.10). You already have Borg installed!!!!
1.1.10 is latest version at the time of this tutorial creation

**THIRD STEP: PREPARING THE BACKUP DEVICE USING NFS MOUNT**
Now, to init the repository, we first need to choose where we want to make the backup. Borg can easily make “local” backups, choosing a local folder, but that defeats the purpose for backups, right? We want to create remote repositories.
If you are making backups to a local (same network) device (another NAS, a computer, etc) then you can choose to use SFTP (SSH file transfer) or just NFS or SMB to mount a folder. If you want to backup to a remote repository outside your LAN (the internet) you HAVE to use SFTP or SSHFS. I’m explaining now how to mount folder using NFS, leaving SFTP for later.
Borg can work in two different ways: PUSH style or PULL style.
In PUSH style, each unit to be backup have Borg installed and it “pushes” the files to a remote folder using NFS, SMB or SSHFS. The target unit do not need to have Borg installed.
PUSH style backup: The QNAP sends files to the backup device

In PULL style, the target unit that is going to receive the backups has Borg installed, and it “pulls” the files from the units to be backup (and so, they don’t need Borg installed) using NFS, SMB or SSHFS. This is great if you have a powerful NAS unit and want to backup several computers.
PULL style backup: The backup device gets files from QNAP. Useful for multiple unit backups into the same backup server.

When using SFTP, the backup unit has Borg installed, opens a secure SSH connection to target unit, connects with Borg in target machine, and uploads the files. In SFTP style, BOTH units need Borg installed.
SFTP: Borg needs to be installed in both devices, and they \"talk\" each other.

I’m assuming you have another device with IP “192.168.1.200” (in my example I’m using a VM with that IP) with a folder called “/backup” inside. I’m also assuming that you have correctly authorized NFS mount with read/write permissions between both devices. If you don’t now how to, you’ll need to investigate. (https://www.qnap.com/en-us/how-to/knowledge-base/article/how-to-enable-and-setup-host-access-for-nfs-connection/)
NFS mount means mirroring two folders from two different devices. So, mounting folder B from device Y into folder A from device X means that even if the folder B is “physically” stored on device Y, the device X can use it exactly as if it was folder A inside his local path. If you write something to folder A, folder B will automatically be updated with that new file and vice-versa.
Graphical example of what happens when mounting folders in Linux system.
Mount usage is: “mount [protocol] [targetIP]:/target/directory /local/directory” So, go to your container and write:
mount -t nfs 192.168.1.200:/backup /mnt 
Mount is the command to mount. “-t nfs” means using NFS, if you want to use SMB you would use “-t cifs”. 192.168.1.200 is the IP of the device where you are going to make backups. /backup is the directory in the target we want to save our backups to (remember you need to correctly enable permission for NFS server sharing in the target device). /mnt is the directory in the container where the /backup folder will be mounted.
OK, so now /mnt in container = /backup in target. If you drop a .txt file in one of those directories, it will immediately appear on the other. So… All we have to do now is make a borg repository on /mnt and wildly start making backups. /mnt will be our working directory.

**FOURTH STEP: ACTUALLY USING BORG** (congrats if you made it here)
Read the documentation
https://borgbackup.readthedocs.io/en/stable/usage/general.html
It’s madness. Right?. It’s OK. In fact we only need a very few borg commands to make it work.
“borg init” creates a repository, that is, a place where the backup files are stored.
“borg create” makes a backup
“borg check” checks backup integrity
“borg prune” prunes the backup (deletes older files)
“borg extract” extract files from a backup
“borg mount” mounts a backup as if it was a directory and you can navigate it
“borg info” gives you info from the repository
“borg list” shows every backup inside the repository
But since we are later using pre-made scripts for backup, you will only need to actually use “init”, “info” and “list” and in case of recovery, “mount”.
let’s create our repository using INIT
https://borgbackup.readthedocs.io/en/stable/usage/init.html
borg init -e [encryption] [options] /mnt 
So, if you want to encrypt the repository with a password (highly recommended) use “-e repokey” or “-e repokey-blake2”. If you want to use a keyfile instead, use “-e keyfile”. If you don’t want to encrypt, use “-e none”. If you want to set a maximum space quota, use “--storage-quota ” to avoid excessive storage usage (I.e “--storage-quota 500G” or “--storage-quota 2.5T”). Read the link above. OK, so in this example:
borg init -e repokey –storage-quota 200G /mnt 
You will be asked for a password. Keep this password safe. If you lose it, you lose your backups!!!! Once finished, we have our repository ready to create the first backup. If you use “ls /mnt” you will see than the /mnt directory is no longer empty, but contains several files. Those are the repository files, and now should also be present in your backup device.
init performed successfully
Let’s talk about actually creating backups. Usually, you would create a backup using the “borg create” backup command, using something like this:
borg create -l -s /mnt::Backup01 /output --exclude ‘*.py’ 
https://borgbackup.readthedocs.io/en/stable/usage/create.html
That would create a backup archive called “backup01” of all files and directories in /output, but excluding every .py file. It will also verbose all files (-l) and stats (-s) during the process. If you later write the same but with “Backup02”, only new added files will be saved (incremental) but deleted files will still be available in “Backup01”. So as new backups are made, you will end running out of storage space. To avoid this you would need to schedule pruning.
https://borgbackup.readthedocs.io/en/stable/usage/prune.html
borg prune [options] [path/to/repo] is used to delete old backups based on your specified options (I.e “save 4 last year backups, 1 backups each month last year, and 1 daily last month).
BUT. To make is simple, we just need to create a script that will automatically 1) Create a new backup with specified name and 2) run a Prune with specified retention policy.
Inside the container head to /persist using “cd /persist”, and create a file called backup.sh using
touch backup.sh chmod 700 backup.sh nano backup.sh 
Then, copy the following and paste it inside nano using CTRL+V
#!/bin/sh # Setting this, so the repo does not need to be given on the command line: export BORG_REPO=/mnt # Setting this, so you won't be asked for your repository passphrase: export BORG_PASSPHRASE='YOURsecurePASS' # or this to ask an external program to supply the passphrase: # export BORG_PASSCOMMAND='pass show backup' # some helpers and error handling: info() { printf "\n%s %s\n\n" "$( date )" "$*" >&2; } trap 'echo $( date ) Backup interrupted >&2; exit 2' INT TERM info "Starting backup" # Backup the most important directories into an archive named after # the machine this script is currently running on: borg create \ --verbose \ --filter AME \ --list \ --stats \ --show-rc \ --compression lz4 \ --exclude-caches \ --exclude '*@Recycle/*' \ --exclude '*@Recently-Snapshot/*' \ --exclude '*[email protected]__thumb/*' \ \ ::'QNAP-{now}' \ /output \ backup_exit=$? info "Pruning repository" # Use the `prune` subcommand to maintain 7 daily, 4 weekly and 6 monthly # archives of THIS machine. The 'QNAP-' prefix is very important to # limit prune's operation to this machine's archives and not apply to # other machines' archives also: borg prune \ --list \ --prefix 'QNAP-' \ --show-rc \ --keep-daily 7 \ --keep-weekly 4 \ --keep-monthly 6 \ prune_exit=$? # use highest exit code as global exit code global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit )) if [ ${global_exit} -eq 0 ]; then info "Backup and Prune finished successfully" elif [ ${global_exit} -eq 1 ]; then info "Backup and/or Prune finished with warnings" else info "Backup and/or Prune finished with errors" fi exit ${global_exit} 
This script seems very complicated, but all it does is
  1. Define the backup location
  2. Define backup parameters, inclusions and exclusions and run backup
  3. Define pruning policy and run prune
  4. Show stats
You can freely modify it using the options you need (they are described in the documentation).
“export BORG_REPO=/mnt” is where the repository is located.
“export BORG_PASSPHRASE='YOURsecurePASS' is your repository password (between the single quotes)
After “borg create” some options are defined, like compression, file listing and stat showing. Then exclusion are defined (each –exclude defines one exclusion rules. In this example I have defined rules to avoid backup thumbnails, recycle bin files, and snapshots). If you wish to exclude mode directories or files, you do it adding a new rule there.
::'QNAP-{now}' defines how backups will be named. Right now they will be named as QNAP-”current date and time”. In case you want only current date and not time used, you can use instead:
::'QNAP-{now:%Y-%m-%d}' \
Be aware that if you decide to do so, you will only be able to create a single backup each day, as subsequent backups the same day will fail, since Borg will find another backup with same name and skip the current one.
/output below is the directory to be backup.
And finally, prune policy is at the end. This defines what backups will be kept and which ones will be deleted. Current defined policy is to keep 7 end of day, then 4 end of week and 6 end of month backups. Extra backups will be deleted. You can modify this depending on your needs. Follow the documentation for extra information and examples.
https://borgbackup.readthedocs.io/en/stable/usage/prune.html
Now save the script using CTRL+O. We are ready. Run the script using:
./backup.sh
It will show progress, including what files are being saved. After finishing, it will return backup name (in this example “QNAP-2020-01-26T01:05:36“ is the name of the backup archive), stats and will return two rc status, one for the backup, and another for pruning. “rc0” means success. “rc1” means finished, but with some errors. “rc2” means failed. You should be returned two rc0 status and the phrase “Backup and Prune finished successfully”. Congrats.
Backup completed. rc 0=good. rc 2=bad
You can use any borg command manually against your repository as needed. For example:
borg list /mnt List your current backups inside the repository borg list /mnt::QNAP-2020-01-26T01:05:36 List all archives inside this specific backup borg info /mnt List general stats of your repository borg check -v –show-rc /mnt Performs an integrity check and returns rc status (0, 1 or 2) 
All that is left is to create the final running script and the cronjob in our QNAP to automate backups. You can skip the next step, as it describes the same process but using SFTP instead of NFS, and head directly to step number Six.

**FIFTH STEP: HTE SAME AS STEP 4, BUT USING SFTP INSTEAD**
If you want to perform backups to an off-site machine, like another NAS located elsewhere, then you can’t use NFS or SMB, as they are not prepared to be used through internet and are not safe. We must use SFTP. SFTP is NOT FTP over SSL (that is FTPS). SFTP stands for Secure File Transfer Protocol, and it’s based on SSH but for file transfer. It is secure, as everything is encrypted, but expect lower speed due encryption overhead. We need to first set it up SSH on our target machine, so be sure to enable it. I also recommend to use a non standard port. In our example, we are using port 4000.
IMPORTANT NOTE: To use SFTP, borg backup must be running in the target machine. You can run it baremetal, or use a container, just as in our QNAP, but if you really can’t get borg running in the target machine, then you cannot use SFTP. There is an alternative, though: SSHFS, which is basically NFS but over SSH. With it you can securely mount a folder over internet. Read this documentation (https://www.digitalocean.com/community/tutorials/how-to-use-sshfs-to-mount-remote-file-systems-over-ssh) and go back to Third Step once you got it working. SSHFS is not covered in this tutorial.
First go to your target machine, and create a new user (in our example this will be “targetuser”)
Second we need to create SSH keys, so both the original machine and the target one can perform SSH connection without needing for a password. It also greatly increases security. In our original container run
ssh-keygen -t rsa 
When you are asked for a passphrase just press enter (no passphrase). Your keys are now stored in ~/.ssh To copy them to your target machine, use this:
ssh-copy-id -p 4000 [email protected] 
If that don’t work, this is an alternative command you can use:
cat ~/.ssh/id_rsa.pub | ssh -p 4000 [email protected] "mkdir -p ~/.ssh && chmod 700 ~/.ssh && cat >> ~/.ssh/authorized_keys" 
You will be asked for targetuser password when connecting. If you were successful, you can now SSH without password in the target machine using “ssh -p 4000 [email protected]0”. Try it now. If you get to login without password prompt, you got it right. If it still asks you for password when SSH’ing, try repeating the last step or google a little about how to transfer the SSH keys to the target machine.
Now that you are logged in your target machine using SSH, install Borg backup if you didn’t previously, create the backup folder (/backup in our example) and init the repository as was shown in Third Step.
borg init -e repokey –storage-quota 200G /backup 
Once the repository is initiated, you can exit SSH using “exit” command. And you will be back in your container. You know what comes next.
cd /persist touch backup.sh chmod 700 backup.sh nano backup.sh 
Now paste this inside:
#!/bin/sh # Setting this, so the repo does not need to be given on the command line: export BORG_REPO=ssh://[email protected]:4000/backup # Setting this, so you won't be asked for your repository passphrase: export BORG_PASSPHRASE='YOURsecurePASS' # or this to ask an external program to supply the passphrase: # export BORG_PASSCOMMAND='pass show backup' # some helpers and error handling: info() { printf "\n%s %s\n\n" "$( date )" "$*" >&2; } trap 'echo $( date ) Backup interrupted >&2; exit 2' INT TERM info "Starting backup" # Backup the most important directories into an archive named after # the machine this script is currently running on: borg create \ --verbose \ --filter AME \ --list \ --stats \ --show-rc \ --compression lz4 \ --exclude-caches \ --exclude '*@Recycle/*' \ --exclude '*@Recently-Snapshot/*' \ --exclude '*[email protected]__thumb/*' \ \ ::'QNAP-{now}' \ /output \ backup_exit=$? info "Pruning repository" # Use the `prune` subcommand to maintain 7 daily, 4 weekly and 6 monthly # archives of THIS machine. The 'QNAP-' prefix is very important to # limit prune's operation to this machine's archives and not apply to # other machines' archives also: borg prune \ --list \ --prefix 'QNAP-' \ --show-rc \ --keep-daily 7 \ --keep-weekly 4 \ --keep-monthly 6 \ prune_exit=$? # use highest exit code as global exit code global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit )) if [ ${global_exit} -eq 0 ]; then info "Backup and Prune finished successfully" elif [ ${global_exit} -eq 1 ]; then info "Backup and/or Prune finished with warnings" else info "Backup and/or Prune finished with errors" fi exit ${global_exit} 
CTRL+O to save, and CTRL+X to exit. OK, let’s do it.
./backup.sh 
It should correctly connect and perform your backup. Note that the only thing I modified from the script shown in Fourth Step is the “BORG_REPO” line, which I substituted from local “/mnt” to remote SSH with our target machine and user data.
Finally all that is left is to automate this.

**SIXTH STEP: AUTOMATING BACKUP**
The only problem is that containers can’t retain mount when they reboot. That is not problem if you are using SFTP, but in case of NFS, we need to re-mount each time the container is started, and fstab does not work in container. The easiest solution is create a script called “start.sh”
cd /persist mkdir log touch start.sh chmod 700 start.sh nano start.sh 
and inside just paste this:
#!/bin/bash log=”/persist/log/borg.log” mount -t nfs 192.168.1.200:/backup /mnt /persist/backup.sh 2>> $log echo ==========FINISH========== >> $log 
Save and try it. Stop container, and start it again. If you use “ls /mnt” you will see that the repository is no longer there. That is because the mounting point unmounted when you stopped the container. Now run
/persist/start.sh 
When it’s finished, a log file will appear inside /persist/log. It contains everything borg was previously putting in the screen, and you can check it using
cat /persist/log/borg.cat 
Everything is ready. All we need to do is is create a crontab job to automate this script whenever we want. You can read here how to edit crontab in QNAP (https://wiki.qnap.com/wiki/Add_items_to_crontab). Add this line to the crontab:
0 1 * * * docker start borgbackup && docker exec borgbackup -c /bin/bash “/persist/start.sh” && docker stop borgbackup 
That will launch container each day at 1:00 am, run the start.sh script, and stop the container when finished.

**EXTRA: RECOVERING OUR DATA**
In case you need to recover your data, you can use any device with Borg installed. There are two commands you can use: borg extract and borg mount. Borg extract will extract all files inside an archive into current directory. Borg mount will mount the repository so you can navigate it, and choose specific files you want to recover, much like NFS or SMB work.
Some examples:
borg extract /mnt::QNAP-2020-01-26T01-05-36 -> Extract all files from this specific backup time point into current directory borg mount /mnt::QNAP-2020-01-26T01-05-36 /recover -> Mounts this specific backup time point inside the /recover directory so you can navigate and search files inside borg mount /mnt /recover -> Mounts all backup time points inside the /recover directory. You can navigate inside all time points and recover whatever you want borg umount /recover -> Unmounts the repository from /recover 

I know this is a somewhat complicated tutorial, and sincerely, I don’t think there will be a lot of people interested, as Borg is for advanced users. That said, I had a ton of fun using borg and creating this tutorial. I hope it can help some people. I am conscious that like 99% of this community's users do not need advanced features and would do great using HB3... But TBH, I'm writing for that 1%.
Next up: I’m trying a duplicati container that it is supposed to have GUI, so… maybe the next tutorial will be a GUI based backup tool. How knows?
submitted by Vortax_Wyvern to qnap [link] [comments]

Issue when compiling postgis from source

Hello right now i'm using Solus. Because there are no compiled version of postgis for Solus, i have no option other than building it from source
as for note i have installed all of the requirement GDAL, PROJ, GEOS, JSON-C, and LIBXML2 (including the devel one)
but when i run configure, it says that it could not find libxml2
here are the full configure output
checking for a BSD-compatible install... /usbin/install -c checking build system type... x86_64-pc-linux-gnu checking host system type... x86_64-pc-linux-gnu checking how to print strings... printf checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether gcc accepts -g... yes checking for gcc option to accept ISO C89... none needed checking for a sed that does not truncate output... /bin/sed checking for grep that handles long lines and -e... /bin/grep checking for egrep... /bin/grep -E checking for fgrep... /bin/grep -F checking for ld used by gcc... /usbin/ld checking if the linker (/usbin/ld) is GNU ld... yes checking for BSD- or MS-compatible name lister (nm)... /usbin/nm -B checking the name lister (/usbin/nm -B) interface... BSD nm checking whether ln -s works... yes checking the maximum length of command line arguments... 1572864 checking how to convert x86_64-pc-linux-gnu file names to x86_64-pc-linux-gnu format... func_convert_file_noop checking how to convert x86_64-pc-linux-gnu file names to toolchain format... func_convert_file_noop checking for /usbin/ld option to reload object files... -r checking for objdump... objdump checking how to recognize dependent libraries... pass_all checking for dlltool... no checking how to associate runtime and link libraries... printf %s\n checking for ar... ar checking for archiver @FILE support... @ checking for strip... strip checking for ranlib... ranlib checking for gawk... gawk checking command to parse /usbin/nm -B output from gcc object... ok checking for sysroot... no checking for a working dd... /bin/dd checking how to truncate binary pipes... /bin/dd bs=4096 count=1 checking for mt... mt checking if mt is a manifest tool... no checking how to run the C preprocessor... gcc -E checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking for dlfcn.h... yes checking for objdir... .libs checking if gcc supports -fno-rtti -fno-exceptions... no checking for gcc option to produce PIC... -fPIC -DPIC checking if gcc PIC flag -fPIC -DPIC works... yes checking if gcc static flag -static works... yes checking if gcc supports -c -o file.o... yes checking if gcc supports -c -o file.o... (cached) yes checking whether the gcc linker (/usbin/ld -m elf_x86_64) supports shared libraries... yes checking whether -lc should be explicitly linked in... no checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... yes checking for gcc... (cached) gcc checking whether we are using the GNU C compiler... (cached) yes checking whether gcc accepts -g... (cached) yes checking for gcc option to accept ISO C89... (cached) none needed checking how to run the C preprocessor... gcc -E checking for grep that handles long lines and -e... (cached) /bin/grep checking for cpp... /usbin/cpp checking if gcc supports -Wall... yes checking if gcc supports -Wmissing-prototypes... yes checking if gcc supports -ffloat-store... yes checking if gcc supports --exclude-libs... yes checking for flex... flex checking lex output file root... lex.yy checking lex library... -lfl checking whether yytext is a pointer... yes checking for bison... bison -y checking ieeefp.h usability... no checking ieeefp.h presence... no checking for ieeefp.h... no checking termios.h usability... yes checking termios.h presence... yes checking for termios.h... yes checking for vasprintf... yes checking for asprintf... yes checking for _LARGEFILE_SOURCE value needed for large files... no checking whether isfinite is declared... yes checking whether isfinite is declared... yes checking for perl... /usbin/perl checking for xsltproc... /usbin/xsltproc checking for convert... /usbin/convert checking for dblatex... no configure: WARNING: dblatex is not installed so PDF documentation cannot be built checking for xmllint... /usbin/xmllint configure: WARNING: could not locate Docbook stylesheets required to build the documentation checking CUnit/CUnit.h usability... no checking CUnit/CUnit.h presence... no checking for CUnit/CUnit.h... no configure: WARNING: could not locate CUnit required for unit tests checking iconv.h usability... yes checking iconv.h presence... yes checking for iconv.h... yes checking for libiconv_open in -liconv... no checking for iconv_open in -lc... yes checking for iconvctl... no checking for libiconvctl... no checking for pg_config... /usbin/pg_config checking PostgreSQL version... PostgreSQL 10.12 checking libpq-fe.h usability... yes checking libpq-fe.h presence... yes checking for libpq-fe.h... yes checking for PQserverVersion in -lpq... yes checking for xml2-config... /usbin/xml2-config checking libxml/tree.h usability... yes checking libxml/tree.h presence... yes checking for libxml/tree.h... yes checking libxml/parser.h usability... yes checking libxml/parser.h presence... yes checking for libxml/parser.h... yes checking libxml/xpath.h usability... yes checking libxml/xpath.h presence... yes checking for libxml/xpath.h... yes checking libxml/xpathInternals.h usability... yes checking libxml/xpathInternals.h presence... yes checking for libxml/xpathInternals.h... yes checking for xmlInitParser in -lxml2... no configure: error: could not find libxml2 
Is there anyone having the solution for this issue?
submitted by Smooth-Efficiency-51 to postgis [link] [comments]

MAME 0.215

MAME 0.215

A wild MAME 0.215 appears! Yes, another month has gone by, and it’s time to check out what’s new. On the arcade side, Taito’s incredibly rare 4-screen top-down racer Super Dead Heat is now playable! Joining its ranks are other rarities, such as the European release of Capcom‘s 19XX: The War Against Destiny, and a bootleg of Jaleco’s P-47 – The Freedom Fighter using a different sound system. We’ve got three newly supported Game & Watch titles: Lion, Manhole, and Spitball Sparky, as well as the crystal screen version of Super Mario Bros. Two new JAKKS Pacific TV games, Capcom 3-in-1 and Disney Princesses, have also been added.
Other improvements include several more protection microcontrollers dumped and emulated, the NCR Decision Mate V working (now including hard disk controllers), graphics fixes for the 68k-based SNK and Alpha Denshi games, and some graphical updates to the Super A'Can driver.
We’ve updated bgfx, adding preliminary Vulkan support. There are some issues we’re aware of, so if you run into issues, check our GitHub issues page to see if it’s already known, and report it if it isn’t. We’ve also improved support for building and running on Linux systems without X11.
You can get the source and Windows binary packages from the download page.

MAMETesters Bugs Fixed

New working machines

New working clones

Machines promoted to working

New machines marked as NOT_WORKING

New clones marked as NOT_WORKING

New working software list additions

Software list items promoted to working

New NOT_WORKING software list additions

Source Changes

submitted by cuavas to emulation [link] [comments]

Building libva and disabling libdrm

I've been struggling with this for a few days now and honestly I don't know where to turn. I've had success getting a clean build of ffmpeg with ha few dependencies via cygwin, but I'm having trouble getting it with libmfx/QuickSync enabled.
In order to get ffmpeg built with libmfx I need to build the msdk, or Intel Media SDK. In turn, the msdk has dependency on libva, which needs libdrm to access the drm infrastructure of the linux kernel. Here's where it get's rough. libdrm just doesn't exist on windows, there's no infrastructure for it to make any sense. And so, it's not available as a package or a separate lib for cygwin.
In order for me to get this to work, I'd need to find a way to tell libva, maybe through configure options/flags, to not use drm. How can I achieve that?
Here's my libva make command:
cd /ffmpeg_sources && rm -rf libva && git clone https://github.com/intel/libva libva && cd libva && CFLAGS=-I/usx86_64-w64-mingw32/sys-root/mingw/include && LDFLAGS=-L/usx86_64-w64-mingw32/sys-root/mingw/lib && export LD_LIBRARY_PATH=/ffmpeg_sources/libva/ && export PKG_CONFIG_LIBDIR=/usx86_64-w64-mingw32/sys-root/mingw/lib/pkgconfig && export PKG_CONFIG_PATH=/usx86_64-w64-mingw32/sys-root/mingw/lib/pkgconfig && ./autogen.sh --prefix=/uslocal --libdir=/usx86_64-w64-mingw32/sys-root/mingw/lib --enable-static --disable-shared && make -j$(nproc) && make install 
Here's the log for it:
autoreconf-2.69: Entering directory `.' autoreconf-2.69: configure.ac: not using Gettext autoreconf-2.69: running: aclocal -I m4 ${ACLOCAL_FLAGS} autoreconf-2.69: configure.ac: tracing autoreconf-2.69: running: libtoolize --copy autoreconf-2.69: running: /usbin/autoconf-2.69 autoreconf-2.69: running: /usbin/autoheader-2.69 autoreconf-2.69: running: automake --add-missing --copy --no-force va/wayland/Makefile.am:30: warning: source file '../drm/va_drm_utils.c' is in a subdirectory, va/wayland/Makefile.am:30: but option 'subdir-objects' is disabled automake-1.16: warning: possible forward-incompatibility. automake-1.16: At least a source file is in a subdirectory, but the 'subdir-objects' automake-1.16: automake option hasn't been enabled. For now, the corresponding output automake-1.16: object file(s) will be placed in the top-level directory. However, automake-1.16: this behaviour will change in future Automake versions: they will automake-1.16: unconditionally cause object files to be placed in the same subdirectory automake-1.16: of the corresponding sources. automake-1.16: You are advised to start using 'subdir-objects' option throughout your automake-1.16: project, to avoid future incompatibilities. autoreconf-2.69: Leaving directory `.' checking for a BSD-compatible install... /usbin/install -c checking whether build environment is sane... yes checking for a thread-safe mkdir -p... /usbin/mkdir -p checking for gawk... gawk checking whether make sets $(MAKE)... yes checking whether make supports nested variables... yes checking whether make supports nested variables... (cached) yes checking build system type... x86_64-pc-cygwin checking host system type... x86_64-pc-cygwin checking how to print strings... printf checking whether make supports the include directive... yes (GNU style) checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.exe checking for suffix of executables... .exe checking whether we are cross compiling... no checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether gcc accepts -g... yes checking for gcc option to accept ISO C89... none needed checking whether gcc understands -c and -o together... yes checking dependency style of gcc... gcc3 checking for a sed that does not truncate output... /usbin/sed checking for grep that handles long lines and -e... /usbin/grep checking for egrep... /usbin/grep -E checking for fgrep... /usbin/grep -F checking for ld used by gcc... /usx86_64-pc-cygwin/bin/ld.exe checking if the linker (/usx86_64-pc-cygwin/bin/ld.exe) is GNU ld... yes checking for BSD- or MS-compatible name lister (nm)... /usbin/nm -B checking the name lister (/usbin/nm -B) interface... BSD nm checking whether ln -s works... yes checking the maximum length of command line arguments... 8192 checking how to convert x86_64-pc-cygwin file names to x86_64-pc-cygwin format... func_convert_file_noop checking how to convert x86_64-pc-cygwin file names to toolchain format... func_convert_file_noop checking for /usx86_64-pc-cygwin/bin/ld.exe option to reload object files... -r checking for objdump... objdump checking how to recognize dependent libraries... file_magic ^x86 archive import|^x86 DLL checking for dlltool... dlltool checking how to associate runtime and link libraries... func_cygming_dll_for_implib checking for ar... ar checking for archiver @FILE support... @ checking for strip... strip checking for ranlib... ranlib checking command to parse /usbin/nm -B output from gcc object... ok checking for sysroot... no checking for a working dd... /usbin/dd checking how to truncate binary pipes... /usbin/dd bs=4096 count=1 checking for mt... no checking if : is a manifest tool... no checking how to run the C preprocessor... gcc -E checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking for dlfcn.h... yes checking for objdir... .libs checking if gcc supports -fno-rtti -fno-exceptions... no checking for gcc option to produce PIC... -DDLL_EXPORT -DPIC checking if gcc PIC flag -DDLL_EXPORT -DPIC works... yes checking if gcc static flag -static works... yes checking if gcc supports -c -o file.o... yes checking if gcc supports -c -o file.o... (cached) yes checking whether the gcc linker (/usx86_64-pc-cygwin/bin/ld.exe) supports shared libraries... yes checking dynamic linker characteristics... Win32 ld.exe checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... no checking whether to build static libraries... yes checking for gcc... (cached) gcc checking whether we are using the GNU C compiler... (cached) yes checking whether gcc accepts -g... (cached) yes checking for gcc option to accept ISO C89... (cached) none needed checking whether gcc understands -c and -o together... (cached) yes checking dependency style of gcc... (cached) gcc3 checking for g++... g++ checking whether we are using the GNU C++ compiler... yes checking whether g++ accepts -g... yes checking dependency style of g++... gcc3 checking how to run the C++ preprocessor... g++ -E checking for ld used by g++... /usx86_64-pc-cygwin/bin/ld.exe checking if the linker (/usx86_64-pc-cygwin/bin/ld.exe) is GNU ld... yes checking whether the g++ linker (/usx86_64-pc-cygwin/bin/ld.exe) supports shared libraries... yes checking for g++ option to produce PIC... -DDLL_EXPORT -DPIC checking if g++ PIC flag -DDLL_EXPORT -DPIC works... yes checking if g++ static flag -static works... yes checking if g++ supports -c -o file.o... yes checking if g++ supports -c -o file.o... (cached) yes checking whether the g++ linker (/usx86_64-pc-cygwin/bin/ld.exe) supports shared libraries... yes checking dynamic linker characteristics... Win32 ld.exe checking how to hardcode library paths into programs... immediate checking for a sed that does not truncate output... (cached) /usbin/sed checking for pkg-config... /usbin/pkg-config checking pkg-config is at least version 0.9.0... yes checking for ANSI C header files... (cached) yes checking for special C compiler options needed for large files... no checking for _FILE_OFFSET_BITS value needed for large files... no checking whether __attribute__((visibility())) is supported... no checking whether gcc accepts -fstack-protector... yes checking for DRM... no configure: error: Package requirements (libdrm >= 2.4) were not met: Package 'libdrm', required by 'virtual:world', not found Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables DRM_CFLAGS and DRM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. 
Edit:
Disabling DRM, if I've done it correctly, got me nowhere.
submitted by hiihiiii to linuxquestions [link] [comments]

05-24 03:24 - 'Building libva in while disabling libdrm' (self.linux) by /u/hiihiiii removed from /r/linux within 276-286min

'''
I've been struggling with this for a few days now and honestly I don't know where to turn. I've had success getting a clean build of ffmpeg with ha few dependencies via cygwin, but I'm having trouble getting it with libmfx/QuickSync enabled.
In order to get ffmpeg built with libmfx I need to build the msdk, or Intel Media SDK. In turn, the msdk has dependency on libva, which needs libdrm to access the drm infrastructure of the linux kernel. Here's where it get's rough. libdrm just doesn't exist on windows, there's no infrastructure for it to make any sense. And so, it's not available as a package or a separate lib for cygwin.
In order for me to get this to work, I'd need to find a way to tell libva, maybe through configure options/flags, to not use drm. How can I achieve that?
Here's my libva make command:
cd /ffmpeg_sources && rm -rf libva && git clone [link]^^1 libva && cd libva && CFLAGS=-I/usx86_64-w64-mingw32/sys-root/mingw/include && LDFLAGS=-L/usx86_64-w64-mingw32/sys-root/mingw/lib && export LD_LIBRARY_PATH=/ffmpeg_sources/libva/ && export PKG_CONFIG_LIBDIR=/usx86_64-w64-mingw32/sys-root/mingw/lib/pkgconfig && export PKG_CONFIG_PATH=/usx86_64-w64-mingw32/sys-root/mingw/lib/pkgconfig && ./autogen.sh --prefix=/uslocal --libdir=/usx86_64-w64-mingw32/sys-root/mingw/lib --enable-static --disable-shared && make -j$(nproc) && make install 
Here's the log for it:
autoreconf-2.69: Entering directory `.' autoreconf-2.69: configure.ac: not using Gettext autoreconf-2.69: running: aclocal -I m4 ${ACLOCAL_FLAGS} autoreconf-2.69: configure.ac: tracing autoreconf-2.69: running: libtoolize --copy autoreconf-2.69: running: /usbin/autoconf-2.69 autoreconf-2.69: running: /usbin/autoheader-2.69 autoreconf-2.69: running: automake --add-missing --copy --no-force va/wayland/Makefile.am:30: warning: source file '../drm/va_drm_utils.c' is in a subdirectory, va/wayland/Makefile.am:30: but option 'subdir-objects' is disabled automake-1.16: warning: possible forward-incompatibility. automake-1.16: At least a source file is in a subdirectory, but the 'subdir-objects' automake-1.16: automake option hasn't been enabled. For now, the corresponding output automake-1.16: object file(s) will be placed in the top-level directory. However, automake-1.16: this behaviour will change in future Automake versions: they will automake-1.16: unconditionally cause object files to be placed in the same subdirectory automake-1.16: of the corresponding sources. automake-1.16: You are advised to start using 'subdir-objects' option throughout your automake-1.16: project, to avoid future incompatibilities. autoreconf-2.69: Leaving directory `.' checking for a BSD-compatible install... /usbin/install -c checking whether build environment is sane... yes checking for a thread-safe mkdir -p... /usbin/mkdir -p checking for gawk... gawk checking whether make sets $(MAKE)... yes checking whether make supports nested variables... yes checking whether make supports nested variables... (cached) yes checking build system type... x86_64-pc-cygwin checking host system type... x86_64-pc-cygwin checking how to print strings... printf checking whether make supports the include directive... yes (GNU style) checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.exe checking for suffix of executables... .exe checking whether we are cross compiling... no checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether gcc accepts -g... yes checking for gcc option to accept ISO C89... none needed checking whether gcc understands -c and -o together... yes checking dependency style of gcc... gcc3 checking for a sed that does not truncate output... /usbin/sed checking for grep that handles long lines and -e... /usbin/grep checking for egrep... /usbin/grep -E checking for fgrep... /usbin/grep -F checking for ld used by gcc... /usx86_64-pc-cygwin/bin/ld.exe checking if the linker (/usx86_64-pc-cygwin/bin/ld.exe) is GNU ld... yes checking for BSD- or MS-compatible name lister (nm)... /usbin/nm -B checking the name lister (/usbin/nm -B) interface... BSD nm checking whether ln -s works... yes checking the maximum length of command line arguments... 8192 checking how to convert x86_64-pc-cygwin file names to x86_64-pc-cygwin format... func_convert_file_noop checking how to convert x86_64-pc-cygwin file names to toolchain format... func_convert_file_noop checking for /usx86_64-pc-cygwin/bin/ld.exe option to reload object files... -r checking for objdump... objdump checking how to recognize dependent libraries... file_magic ^x86 archive import|^x86 DLL checking for dlltool... dlltool checking how to associate runtime and link libraries... func_cygming_dll_for_implib checking for ar... ar checking for archiver @FILE support... @ checking for strip... strip checking for ranlib... ranlib checking command to parse /usbin/nm -B output from gcc object... ok checking for sysroot... no checking for a working dd... /usbin/dd checking how to truncate binary pipes... /usbin/dd bs=4096 count=1 checking for mt... no checking if : is a manifest tool... no checking how to run the C preprocessor... gcc -E checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking for dlfcn.h... yes checking for objdir... .libs checking if gcc supports -fno-rtti -fno-exceptions... no checking for gcc option to produce PIC... -DDLL_EXPORT -DPIC checking if gcc PIC flag -DDLL_EXPORT -DPIC works... yes checking if gcc static flag -static works... yes checking if gcc supports -c -o file.o... yes checking if gcc supports -c -o file.o... (cached) yes checking whether the gcc linker (/usx86_64-pc-cygwin/bin/ld.exe) supports shared libraries... yes checking dynamic linker characteristics... Win32 ld.exe checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... no checking whether to build static libraries... yes checking for gcc... (cached) gcc checking whether we are using the GNU C compiler... (cached) yes checking whether gcc accepts -g... (cached) yes checking for gcc option to accept ISO C89... (cached) none needed checking whether gcc understands -c and -o together... (cached) yes checking dependency style of gcc... (cached) gcc3 checking for g++... g++ checking whether we are using the GNU C++ compiler... yes checking whether g++ accepts -g... yes checking dependency style of g++... gcc3 checking how to run the C++ preprocessor... g++ -E checking for ld used by g++... /usx86_64-pc-cygwin/bin/ld.exe checking if the linker (/usx86_64-pc-cygwin/bin/ld.exe) is GNU ld... yes checking whether the g++ linker (/usx86_64-pc-cygwin/bin/ld.exe) supports shared libraries... yes checking for g++ option to produce PIC... -DDLL_EXPORT -DPIC checking if g++ PIC flag -DDLL_EXPORT -DPIC works... yes checking if g++ static flag -static works... yes checking if g++ supports -c -o file.o... yes checking if g++ supports -c -o file.o... (cached) yes checking whether the g++ linker (/usx86_64-pc-cygwin/bin/ld.exe) supports shared libraries... yes checking dynamic linker characteristics... Win32 ld.exe checking how to hardcode library paths into programs... immediate checking for a sed that does not truncate output... (cached) /usbin/sed checking for pkg-config... /usbin/pkg-config checking pkg-config is at least version 0.9.0... yes checking for ANSI C header files... (cached) yes checking for special C compiler options needed for large files... no checking for _FILE_OFFSET_BITS value needed for large files... no checking whether __attribute__((visibility())) is supported... no checking whether gcc accepts -fstack-protector... yes checking for DRM... no configure: error: Package requirements (libdrm >= 2.4) were not met: Package 'libdrm', required by '[link]^^2 ', not found Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables DRM_CFLAGS and DRM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. 
'''
Building libva in while disabling libdrm
Go1dfish undelete link
unreddit undelete link
Author: hiihiiii
1: gith*b*co*/intel/l***a 2: virtual:world
Unknown links are censored to prevent spreading illicit content.
submitted by removalbot to removalbot [link] [comments]

MAME 0.215

MAME 0.215

A wild MAME 0.215 appears! Yes, another month has gone by, and it’s time to check out what’s new. On the arcade side, Taito’s incredibly rare 4-screen top-down racer Super Dead Heat is now playable! Joining its ranks are other rarities, such as the European release of Capcom‘s 19XX: The War Against Destiny, and a bootleg of Jaleco’s P-47 – The Freedom Fighter using a different sound system. We’ve got three newly supported Game & Watch titles: Lion, Manhole, and Spitball Sparky, as well as the crystal screen version of Super Mario Bros. Two new JAKKS Pacific TV games, Capcom 3-in-1 and Disney Princesses, have also been added.
Other improvements include several more protection microcontrollers dumped and emulated, the NCR Decision Mate V working (now including hard disk controllers), graphics fixes for the 68k-based SNK and Alpha Denshi games, and some graphical updates to the Super A'Can driver.
We’ve updated bgfx, adding preliminary Vulkan support. There are some issues we’re aware of, so if you run into issues, check our GitHub issues page to see if it’s already known, and report it if it isn’t. We’ve also improved support for building and running on Linux systems without X11.
You can get the source and Windows binary packages from the download page.

MAMETesters Bugs Fixed

New working machines

New working clones

Machines promoted to working

New machines marked as NOT_WORKING

New clones marked as NOT_WORKING

New working software list additions

Software list items promoted to working

New NOT_WORKING software list additions

Source Changes

submitted by cuavas to MAME [link] [comments]

Format String to dump binary and gain RCE - 33c3ctf ESPR (pwn 150) 3. Printf Scanf and Format Specifier in C Generate PDF file and Print Form in PowerApps - Learning ... Leaking the Stack / Printf Format Vulnerability  echooo [32] picoCTF 2018 Binary Options Strategy - simple binary option strategy with 85% success rate. must watch iq profit Format String Vulnerability Remote format string exploit in syslog() - bin 0x1E

Is there a printf converter to print in binary format? The printf() family is only able to print in base 8, 10, and 16 using the standard specifiers directly. I suggest creating a function that converts the number to a string per code's particular needs. To print in any base [2-36] All other answers so far have at least one of these limitations. Use static memory for the return buffer. This ... Linux Printf Format Binary Options El comando printf Stranger, este es un tema muy grande que necesita experiencia - por favor, rellene la información que falta, amplíe las descripciones y corrija los detalles si puede Atención: Se trata de Bash-builtin printf comando - sin embargo, la descripción debe ser Casi idéntico para un comando externo que sigue a POSIX. Format of the format string The format string is a character string, beginning and ending in its initial shift state, if any. The format string is composed of zero or more directives: ordinary characters (not % ), which are copied unchanged to the output stream; and conversion specifications, each of which results in fetching zero or more subsequent arguments. “printf” command in Linux is used to display the given string, number or any other format specifier on the terminal window. It works the same way as “printf” works in programming languages like C . $ printf "%s\n" "hello printf" hello printf The format string is applied to each argument: $ printf "%s\n" "hello printf" "in" "bash script" hello printf in bash script Format specifiers. As you could seen in the previous simple examples we have used %s as a format specifier. The most commonly used printf specifiers are %s, %b, %d, %x and %f ... Linux printf command help, examples, and information. A minus sign. This tells printf to left-adjust the conversion of the argument.: number: An integer that specifies field width; printf will print a conversion of ARGUMENT in a field at least number characters wide. If necessary it will be padded on the left (or right, if left-adjustment is called for) to make up the field width. The -v option tells printf not to print the output but to assign it to the variable.. The format is a string which may contain three different types of objects:. Normal characters that are simply printed to the output as-is. Backslash-escaped characters which are interpreted and then printed. Conversion specifications that describe the format and are replaced by the values of respective ... Format of the format string The format string is a character string, beginning and ending in its initial shift state, if any. The format string is composed of zero or more directives: ordinary characters (not % ), which are copied unchanged to the output stream; and conversion specifications, each of which results in fetching zero or more subsequent arguments. Hexadecimal is often just as good (or even better), as it maps every 4 bits into one hex-digit, giving you both a compact and expressive representation of the binary data. – Kerrek SB Jun 16 '11 at 14:01 Summary: This page is a printf formatting cheat sheet. I originally created this cheat sheet for my own purposes, and then thought I would share it here. A great thing about the printf formatting syntax is that the format specifiers you can use are very similar — if not identical — between different languages, including C, C++, Java, Perl, PHP, Ruby, Scala, and others.

[index] [2905] [21742] [4572] [5638] [16956] [17433] [176] [9533] [26996] [13333]

Format String to dump binary and gain RCE - 33c3ctf ESPR (pwn 150)

Vulnerability in the C library printf() function and how to take advantage of it. InCTFj is a Capture The Flag contest exclusive for school students in India. REGISTER NOW : https://junior.inctf ... While binary options theoretically play a role in asset pricing they are prone to fraud and banned by regulators in many jurisdictions as a form of gambling $2000 in 5 minutes - my binary options ... Printf function in C Language, Santhosh C Classes, Santhosh Voruganti C Classes, C telugu, Easy to Learn C, C Language Tutorials in Telugu, Scanf Function in C, Format Specifier in C, Leaking the Stack / Printf Format Vulnerability echooo [32] picoCTF 2018 ... Patching Binaries with Pwntools BE QUICK OR BE DEAD 2 [29] picoCTF 2018 - Duration: 13:09. John Hammond 6,612 views ... Format String to dump binary and gain RCE - 33c3ctf ESPR ... Leaking the Stack / Printf Format Vulnerability echooo [32] picoCTF 2018 - Duration: 13:01. John Hammond 6,808 views. 13:01. Minetest ... Dumping the binary through a format string vulnerability, leaking libc addresses in the global offset table, finding the matching libc and overwriting [email protected] with system() to get RCE ... In this video we will see how to generate PDF file in PowerApps. How to print a form in PowerApps. Check out my articles and blogs on Power Platform here htt...

http://arab-binary-option.usasdifo.gq