八个拿来即用的Python自动化脚本!("实用Python自动化脚本合集:八个即拿即用的工具!")
原创
一、自动发送邮件脚本
在日常生活中,我们经常性需要发送邮件来通知他人或者传递文件。以下是一个使用Python的smtplib库自动发送邮件的脚本。
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def send_email(sender_email, receiver_email, subject, body, password):
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = receiver_email
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.example.com', 587)
server.starttls()
server.login(sender_email, password)
text = msg.as_string()
server.sendmail(sender_email, receiver_email, text)
server.quit()
# 使用示例
send_email('your_email@example.com', 'receiver_email@example.com', 'Test Subject', 'Hello, this is a test email!', 'your_password')
二、自动下载网络图片脚本
有时我们需要从网络上下载大量图片,以下是一个使用Python的requests库和BeautifulSoup库自动下载网络图片的脚本。
import os
import requests
from bs4 import BeautifulSoup
def download_images(url, download_path):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
images = [img['src'] for img in soup.find_all('img') if 'src' in img.attrs]
if not os.path.exists(download_path):
os.makedirs(download_path)
for img_url in images:
img_name = img_url.split('/')[-1]
img_path = os.path.join(download_path, img_name)
r = requests.get(img_url)
with open(img_path, 'wb') as f:
f.write(r.content)
# 使用示例
download_images('https://www.example.com', 'downloaded_images')
三、自动备份文件夹脚本
为了防止数据丢失,定期备份文件夹是很重要的。以下是一个使用Python的shutil库自动备份文件夹的脚本。
import os
import shutil
def backup_folder(source_folder, backup_folder):
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
for foldername, subfolders, filenames in os.walk(source_folder):
for filename in filenames:
filepath = os.path.join(foldername, filename)
backup_filepath = os.path.join(backup_folder, filename)
shutil.copy(filepath, backup_filepath)
# 使用示例
backup_folder('source_folder_path', 'backup_folder_path')
四、自动整理文件脚本
随着时间的推移,我们的电脑中会积累很多文件,以下是一个使用Python的os库和shutil库自动整理文件的脚本。
import os
import shutil
def organize_files(directory):
for filename in os.listdir(directory):
filepath = os.path.join(directory, filename)
if os.path.isfile(filepath):
file_extension = os.path.splitext(filename)[1].lower()
if file_extension in ['.txt', '.doc', '.docx']:
shutil.move(filepath, os.path.join(directory, 'Documents', filename))
elif file_extension in ['.jpg', '.jpeg', '.png']:
shutil.move(filepath, os.path.join(directory, 'Images', filename))
elif file_extension in ['.mp3', '.wav', '.mp4']:
shutil.move(filepath, os.path.join(directory, 'Media', filename))
else:
shutil.move(filepath, os.path.join(directory, 'Others', filename))
# 使用示例
organize_files('your_directory_path')
五、自动生成PDF报告脚本
生成PDF报告是数据分析中常见的需求,以下是一个使用Python的reportlab库自动生成PDF报告的脚本。
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
def create_pdf_report(report_title, data, output_path):
c = canvas.Canvas(output_path, pagesize=letter)
width, height = letter
c.drawString(100, height - 40, report_title)
c.drawString(100, height - 60, 'Data:')
for i, item in enumerate(data):
c.drawString(100, height - 80 - (i * 20), str(item))
c.save()
# 使用示例
create_pdf_report('Sales Report', [100, 200, 150, 180], 'sales_report.pdf')
六、自动抓取网页数据脚本
网页数据抓取是数据采集的重要手段,以下是一个使用Python的requests库和BeautifulSoup库自动抓取网页数据的脚本。
import requests
from bs4 import BeautifulSoup
def scrape_data(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
data = []
for item in soup.find_all('div', class_='item'):
title = item.find('h2').text
description = item.find('p').text
data.append({'title': title, 'description': description})
return data
# 使用示例
scraped_data = scrape_data('https://www.example.com')
print(scraped_data)
七、自动爬取网站图片脚本
有时我们需要从网站上爬取大量的图片,以下是一个使用Python的requests库和BeautifulSoup库自动爬取网站图片的脚本。
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
def download_images_from_url(url, download_folder):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
os.makedirs(download_folder, exist_ok=True)
for img in soup.find_all('img'):
img_url = img.get('src')
if not img_url:
continue
img_full_url = urljoin(url, img_url)
img_name = os.path.basename(img_full_url)
img_path = os.path.join(download_folder, img_name)
r = requests.get(img_full_url, stream=True)
if r.status_code == 200:
with open(img_path, 'wb') as f:
for chunk in r:
f.write(chunk)
# 使用示例
download_images_from_url('https://www.example.com', 'downloaded_images')
八、自动处理Excel数据脚本
Excel数据处理是数据分析中的常见任务,以下是一个使用Python的pandas库自动处理Excel数据的脚本。
import pandas as pd
def process_excel_data(file_path):
df = pd.read_excel(file_path)
# 数据处理逻辑
df['Total'] = df['Price'] * df['Quantity']
df_sorted = df.sort_values(by='Total', ascending=False)
df_sorted.to_excel('processed_data.xlsx', index=False)
# 使用示例
process_excel_data('data.xlsx')
以上就是八个实用的Python自动化脚本,它们涵盖了发送邮件、下载图片、备份文件夹、整理文件、生成PDF报告、抓取网页数据、爬取网站图片和处理Excel数据等多个方面。期待这些脚本能够帮助您减成本时间工作高效,简化日常任务。