php网站如何防注入

// 创建数据库连接
$dsn = “mysql:host=your_host;dbname=your_db”;
$username = ‘your_username’;
$password = ‘your_password’;

try {
$pdo = new PDO($dsn, $username, $password);
// 设置PDO错误模式为异常
$pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);

// 准备SQL语句
$stmt = $pdo->prepare("SELECT * FROM users WHERE username = :username");
$stmt->bindParam(':username', $userInput); // 绑定参数
$userInput = "admin' -- "; // 用户输入
$stmt->execute();

// 获取结果
$results = $stmt->fetchAll(PDO::FETCH_ASSOC);
print_r($results);

} catch (PDOException $e) {
echo “Connection failed: ” . $e->getMessage();
}

python爬虫数据代码案例

import requests
from bs4 import BeautifulSoup

def fetch_page(url):
“”” 抓取网页内容 “””
try:
response = requests.get(url)
response.raise_for_status() # 检查请求是否成功
return response.text
except requests.RequestException as e:
print(f”Error fetching {url}: {e}”)
return None

def parse_page(html_content):
“”” 解析网页内容,提取所有链接 “””
soup = BeautifulSoup(html_content, ‘html.parser’)
links = []
for link in soup.find_all(‘a’, href=True):
links.append(link[‘href’])
return links

def main():
url = “http://example.com” # 替换为你要爬取的网址
html_content = fetch_page(url)
if html_content:
links = parse_page(html_content)
for link in links:
print(link)
else:
print(“Failed to fetch the page.”)

if name == “main“:
main()

确保你已经安装了requestsBeautifulSoup。如果还没有安装,可以通过以下命令安装

pip install requests beautifulsoup4