Work Smarter Not Harder with Scripts


Text Expansion Scripts

AutoHotkey Windows

; Date insertion
::ddate::
FormatTime, CurrentDate,, yyyy-MM-dd
SendInput %CurrentDate%
return

; Email signature
::sig::
SendInput John Doe{Enter}Senior Developer{Enter}john@company.com{Enter}+1 (555) 123-4567
return

; Lorem ipsum generator
::lorem::
SendInput Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
return

; Common code blocks
::forloop::
SendInput for (let i = 0; i < array.length; i++) {{}{Enter}{Tab}console.log(array[i]);{Enter}{Backspace}}
return

; Clipboard paste as plain text
#v::
ClipSaved := ClipboardAll
Clipboard := Clipboard
SendInput ^v
Sleep 100
Clipboard := ClipSaved
ClipSaved := ""
return

macOS Text Replacement

# System Preferences → Keyboard → Text
# Or via defaults command:

defaults write -g NSUserDictionaryReplacementItems -array-add \
'{"on" = "@date"; "with" = "2024-01-15";}' \
'{"on" = "@sig"; "with" = "John Doe\nDeveloper";}' \
'{"on" = "@addr"; "with" = "123 Main St, City, State 12345";}'

Document Generation

Python Report Generator

from docx import Document
from datetime import datetime

def generate_report(data, template_path, output_path):
    doc = Document(template_path)

    # Replace placeholders
    for paragraph in doc.paragraphs:
        if "{{DATE}}" in paragraph.text:
            paragraph.text = paragraph.text.replace("{{DATE}}", datetime.now().strftime("%Y-%m-%d"))
        if "{{CLIENT}}" in paragraph.text:
            paragraph.text = paragraph.text.replace("{{CLIENT}}", data["client"])
        if "{{TOTAL}}" in paragraph.text:
            paragraph.text = paragraph.text.replace("{{TOTAL}}", str(data["total"]))

    # Save
    doc.save(output_path)

# Usage
data = {
    "client": "Acme Corp",
    "total": 15000,
    "items": ["Consulting", "Development", "Support"]
}
generate_report(data, "template.docx", f"Invoice_{data['client']}_{datetime.now().strftime('%Y%m%d')}.docx")

Excel Automation

import openpyxl
from openpyxl.styles import Font, PatternFill

def create_summary(data):
    wb = openpyxl.Workbook()
    ws = wb.active
    ws.title = "Summary"

    # Headers
    headers = ["Item", "Quantity", "Price", "Total"]
    for col, header in enumerate(headers, 1):
        cell = ws.cell(row=1, column=col, value=header)
        cell.font = Font(bold=True)
        cell.fill = PatternFill(start_color="CCCCCC", end_color="CCCCCC", fill_type="solid")

    # Data
    for row, item in enumerate(data, 2):
        ws.cell(row=row, column=1, value=item["name"])
        ws.cell(row=row, column=2, value=item["qty"])
        ws.cell(row=row, column=3, value=item["price"])
        ws.cell(row=row, column=4, value=f"=B{row}*C{row}")

    # Auto-width
    for column in ws.columns:
        max_length = 0
        column_letter = column[0].column_letter
        for cell in column:
            try:
                if len(str(cell.value)) > max_length:
                    max_length = len(str(cell.value))
            except:
                pass
        adjusted_width = min(max_length + 2, 50)
        ws.column_dimensions[column_letter].width = adjusted_width

    wb.save("summary.xlsx")

# Usage
items = [
    {"name": "Widget A", "qty": 10, "price": 25.50},
    {"name": "Widget B", "qty": 5, "price": 100.00}
]
create_summary(items)

Data Processing

CSV Transformation

import csv
import json
from collections import defaultdict

def process_sales(input_file, output_file):
    # Aggregate by region
    sales_by_region = defaultdict(float)

    with open(input_file, 'r') as f:
        reader = csv.DictReader(f)
        for row in reader:
            region = row['region']
            amount = float(row['amount'])
            sales_by_region[region] += amount

    # Write summary
    with open(output_file, 'w') as f:
        json.dump(dict(sales_by_region), f, indent=2)

    # Generate chart data
    chart_data = [["Region", "Sales"]]
    for region, total in sales_by_region.items():
        chart_data.append([region, total])

    return chart_data

# Usage
result = process_sales("sales.csv", "summary.json")

Log Analysis

#!/bin/bash
# analyze_logs.sh

LOGFILE="/var/log/nginx/access.log"
REPORT="/tmp/log_report.txt"

echo "Log Analysis Report - $(date)" > $REPORT
echo "================================" >> $REPORT

# Top 10 IP addresses
echo -e "\nTop 10 IP Addresses:" >> $REPORT
awk '{print $1}' $LOGFILE | sort | uniq -c | sort -rn | head -10 >> $REPORT

# Top 10 requested URLs
echo -e "\nTop 10 URLs:" >> $REPORT
awk '{print $7}' $LOGFILE | sort | uniq -c | sort -rn | head -10 >> $REPORT

# HTTP status codes
echo -e "\nHTTP Status Codes:" >> $REPORT
awk '{print $9}' $LOGFILE | sort | uniq -c | sort -rn >> $REPORT

# Error rate
total=$(wc -l < $LOGFILE)
errors=$(awk '$9 ~ /^5/ {count++} END {print count+0}' $LOGFILE)
error_rate=$(echo "scale=2; ($errors/$total)*100" | bc)
echo -e "\nError Rate: ${error_rate}%" >> $REPORT

mail -s "Daily Log Report" admin@company.com < $REPORT

Communication Automation

Email Template Sender

import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import csv

def send_bulk_emails(template_file, csv_file, subject):
    with open(template_file, 'r') as f:
        template = f.read()

    with open(csv_file, 'r') as f:
        recipients = csv.DictReader(f)

        for row in recipients:
            msg = MIMEMultipart()
            msg['From'] = 'sender@company.com'
            msg['To'] = row['email']
            msg['Subject'] = subject

            # Personalize template
            body = template
            for key, value in row.items():
                body = body.replace(f'{{{key}}}', value)

            msg.attach(MIMEText(body, 'plain'))

            with smtplib.SMTP('smtp.gmail.com', 587) as server:
                server.starttls()
                server.login('sender@company.com', 'password')
                server.send_message(msg)

# Template file (template.txt):
# Hello {name},
# 
# Your order #{order_id} for {product} has been shipped.
# Track at: {tracking_url}

# CSV file (recipients.csv):
# name,email,order_id,product,tracking_url
# John,john@email.com,12345,Widget,https://track.com/12345

Slack Status Updater

import requests
import json
from datetime import datetime

def update_slack_status(token, text, emoji, expiration=0):
    url = "https://slack.com/api/users.profile.set"
    headers = {"Authorization": f"Bearer {token}"}

    profile = {
        "status_text": text,
        "status_emoji": emoji,
        "status_expiration": expiration
    }

    payload = {"profile": json.dumps(profile)}
    response = requests.post(url, headers=headers, data=payload)
    return response.json()

# Auto-update based on calendar
def sync_calendar_to_slack(calendar_events):
    now = datetime.now()

    for event in calendar_events:
        start = datetime.fromisoformat(event['start'])
        end = datetime.fromisoformat(event['end'])

        if start <= now <= end:
            update_slack_status(
                token="xoxp-token",
                text=f"In meeting: {event['title']}",
                emoji=":calendar:",
                expiration=int(end.timestamp())
            )
            return

    # Clear status if no meeting
    update_slack_status(token="xoxp-token", text="", emoji="", expiration=0)

File Operations

Smart File Organizer

import os
import shutil
from pathlib import Path
from datetime import datetime

class FileOrganizer:
    def __init__(self, source_dir):
        self.source = Path(source_dir)
        self.stats = {"moved": 0, "duplicates": 0, "errors": 0}

    def organize_by_date(self):
        for file_path in self.source.iterdir():
            if file_path.is_file():
                try:
                    # Get creation date
                    stat = file_path.stat()
                    date = datetime.fromtimestamp(stat.st_mtime)

                    # Create folder structure
                    dest_folder = self.source / str(date.year) / date.strftime("%m-%B")
                    dest_folder.mkdir(parents=True, exist_ok=True)

                    # Handle duplicates
                    dest_file = dest_folder / file_path.name
                    if dest_file.exists():
                        base = dest_file.stem
                        suffix = dest_file.suffix
                        counter = 1
                        while dest_file.exists():
                            dest_file = dest_folder / f"{base}_{counter}{suffix}"
                            counter += 1
                        self.stats["duplicates"] += 1

                    shutil.move(str(file_path), str(dest_file))
                    self.stats["moved"] += 1

                except Exception as e:
                    print(f"Error processing {file_path}: {e}")
                    self.stats["errors"] += 1

        return self.stats

# Usage
organizer = FileOrganizer("C:/Downloads")
result = organizer.organize_by_date()
print(f"Moved: {result['moved']}, Duplicates: {result['duplicates']}, Errors: {result['errors']}")

Duplicate File Finder

import hashlib
import os
from collections import defaultdict

def find_duplicates(folder):
    hashes = defaultdict(list)

    for root, _, files in os.walk(folder):
        for filename in files:
            filepath = os.path.join(root, filename)
            file_hash = hashlib.md5(open(filepath, 'rb').read(4096)).hexdigest()
            hashes[file_hash].append(filepath)

    # Return only duplicates
    return {h: paths for h, paths in hashes.items() if len(paths) > 1}

# Remove duplicates keeping first occurrence
def remove_duplicates(duplicates):
    for file_hash, paths in duplicates.items():
        for duplicate_path in paths[1:]:  # Keep first, delete rest
            os.remove(duplicate_path)
            print(f"Removed: {duplicate_path}")

System Monitoring

Resource Monitor Alert

import psutil
import time
import requests

THRESHOLDS = {
    'cpu': 80,
    'memory': 85,
    'disk': 90
}

WEBHOOK_URL = "https://hooks.slack.com/services/YOUR/WEBHOOK/URL"

def check_resources():
    alerts = []

    cpu = psutil.cpu_percent(interval=1)
    if cpu > THRESHOLDS['cpu']:
        alerts.append(f"High CPU: {cpu}%")

    memory = psutil.virtual_memory().percent
    if memory > THRESHOLDS['memory']:
        alerts.append(f"High Memory: {memory}%")

    disk = psutil.disk_usage('/').percent
    if disk > THRESHOLDS['disk']:
        alerts.append(f"High Disk: {disk}%")

    if alerts:
        message = "\n".join(alerts)
        requests.post(WEBHOOK_URL, json={"text": f"Resource Alert:\n{message}"})

# Run continuously
while True:
    check_resources()
    time.sleep(300)  # Check every 5 minutes

Process Auto-Restarter

#!/bin/bash
# monitor_service.sh

SERVICE="critical_app"
LOG="/var/log/service_monitor.log"

if ! pgrep -x "$SERVICE" > /dev/null; then
    echo "$(date): $SERVICE not running. Restarting..." >> $LOG
    systemctl restart $SERVICE

    # Verify restart
    sleep 2
    if pgrep -x "$SERVICE" > /dev/null; then
        echo "$(date): $SERVICE restarted successfully" >> $LOG
    else
        echo "$(date): FAILED to restart $SERVICE" >> $LOG
        # Send alert
        echo "Critical: $SERVICE failed to restart" | mail -s "Service Alert" admin@company.com
    fi
fi

Web Scraping

Price Monitor

import requests
from bs4 import BeautifulSoup
import json
from datetime import datetime

def check_price(url, selector, target_price):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
    }

    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')

    price_element = soup.select_one(selector)
    if price_element:
        price_text = price_element.text.strip().replace('$', '').replace(',', '')
        current_price = float(price_text)

        # Log price
        log_entry = {
            'date': datetime.now().isoformat(),
            'url': url,
            'price': current_price
        }

        with open('price_history.jsonl', 'a') as f:
            f.write(json.dumps(log_entry) + '\n')

        # Alert if below target
        if current_price <= target_price:
            send_alert(f"Price drop! Now ${current_price} (Target: ${target_price})\n{url}")

# Usage
check_price(
    url="https://example.com/product",
    selector=".price-current",
    target_price=100.00
)

Database Automation

Auto-Backup and Rotate

import subprocess
import os
from datetime import datetime, timedelta
import glob

def backup_database(db_name, backup_dir, retention_days=7):
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    backup_file = f"{db_name}_{timestamp}.sql.gz"
    backup_path = os.path.join(backup_dir, backup_file)

    # Create backup
    cmd = f"mysqldump -u backup -p'password' {db_name} | gzip > {backup_path}"
    subprocess.run(cmd, shell=True, check=True)

    # Verify backup
    if os.path.exists(backup_path) and os.path.getsize(backup_path) > 0:
        print(f"Backup created: {backup_path}")

        # Clean old backups
        cutoff = datetime.now() - timedelta(days=retention_days)
        for old_backup in glob.glob(os.path.join(backup_dir, f"{db_name}_*.sql.gz")):
            file_time = datetime.fromtimestamp(os.path.getctime(old_backup))
            if file_time < cutoff:
                os.remove(old_backup)
                print(f"Removed old backup: {old_backup}")
    else:
        raise Exception("Backup failed")

Git Productivity

Batch Git Operations

#!/bin/bash
# git_batch.sh

REPOS_DIR="$HOME/projects"
BRANCH="main"

for repo in $REPOS_DIR/*/; do
    if [ -d "$repo/.git" ]; then
        echo "Processing $repo..."
        cd $repo

        # Stash any changes
        git stash

        # Pull latest
        git checkout $BRANCH
        git pull origin $BRANCH

        # Update dependencies if present
        if [ -f "package.json" ]; then
            npm install
        fi
        if [ -f "requirements.txt" ]; then
            pip install -r requirements.txt
        fi

        # Run tests
        if [ -f "package.json" ]; then
            npm test
        elif [ -f "pytest.ini" ] || [ -f "setup.py" ]; then
            pytest
        fi

        echo "$repo updated"
        echo "-------------------"
    fi
done

Git Stats Reporter

import subprocess
import json
from collections import defaultdict

def get_git_stats(repo_path, since="30.days"):
    cmd = f"git -C {repo_path} log --since={since} --pretty=format:'%h|%an|%ad|%s' --date=short"
    result = subprocess.run(cmd, shell=True, capture_output=True, text=True)

    stats = defaultdict(lambda: {"commits": 0, "files_changed": 0, "insertions": 0, "deletions": 0})

    for line in result.stdout.strip().split('\n'):
        if '|' in line:
            hash, author, date, message = line.split('|')
            stats[author]["commits"] += 1

            # Get file stats for this commit
            stat_cmd = f"git -C {repo_path} show --stat {hash} --format=''"
            stat_result = subprocess.run(stat_cmd, shell=True, capture_output=True, text=True)

            # Parse insertions/deletions
            for stat_line in stat_result.stdout.split('\n'):
                if 'changed' in stat_line:
                    parts = stat_line.split(',')
                    for part in parts:
                        if 'insertion' in part:
                            stats[author]["insertions"] += int(part.split()[0])
                        elif 'deletion' in part:
                            stats[author]["deletions"] += int(part.split()[0])

    return json.dumps(dict(stats), indent=2)

Leave a Reply

Your email address will not be published. Required fields are marked *