9.1 性能优化基础

9.1.1 性能优化概述

class MySQLPerformanceOptimization:
    def __init__(self):
        self.optimization_levels = {
            "硬件层面": {
                "描述": "通过硬件配置提升数据库性能",
                "优化方向": [
                    "CPU性能和核心数",
                    "内存容量和速度",
                    "存储I/O性能",
                    "网络带宽和延迟"
                ],
                "关键指标": {
                    "CPU使用率": "< 80%",
                    "内存使用率": "< 85%",
                    "磁盘I/O等待": "< 10%",
                    "网络延迟": "< 1ms"
                }
            },
            "系统层面": {
                "描述": "操作系统级别的性能调优",
                "优化方向": [
                    "文件系统选择和配置",
                    "内核参数调优",
                    "进程调度优化",
                    "网络栈优化"
                ],
                "关键配置": {
                    "文件系统": "ext4/xfs with noatime",
                    "调度器": "deadline/noop for SSD",
                    "内存管理": "vm.swappiness=1",
                    "网络优化": "tcp_window_scaling=1"
                }
            },
            "MySQL配置层面": {
                "描述": "MySQL服务器参数优化",
                "优化方向": [
                    "内存分配优化",
                    "I/O配置调优",
                    "连接管理优化",
                    "查询缓存配置"
                ],
                "核心参数": {
                    "innodb_buffer_pool_size": "70-80% of RAM",
                    "innodb_log_file_size": "256MB-2GB",
                    "max_connections": "根据业务需求",
                    "query_cache_size": "0 (MySQL 8.0已移除)"
                }
            },
            "应用层面": {
                "描述": "应用程序和SQL优化",
                "优化方向": [
                    "SQL语句优化",
                    "索引设计优化",
                    "连接池管理",
                    "缓存策略优化"
                ],
                "最佳实践": {
                    "SQL优化": "避免全表扫描,使用合适的索引",
                    "索引设计": "复合索引,覆盖索引",
                    "连接管理": "连接池,长连接复用",
                    "缓存策略": "Redis/Memcached"
                }
            }
        }
        
        self.performance_metrics = {
            "响应时间指标": {
                "查询响应时间": {
                    "定义": "从发起查询到返回结果的时间",
                    "目标值": "< 100ms (OLTP), < 1s (OLAP)",
                    "监控方法": "slow_query_log, performance_schema",
                    "优化策略": "索引优化,查询重写,硬件升级"
                },
                "连接建立时间": {
                    "定义": "客户端连接到MySQL服务器的时间",
                    "目标值": "< 10ms",
                    "监控方法": "connection_control插件",
                    "优化策略": "连接池,网络优化"
                }
            },
            "吞吐量指标": {
                "QPS (Queries Per Second)": {
                    "定义": "每秒执行的查询数量",
                    "计算公式": "Questions / Uptime",
                    "监控方法": "SHOW GLOBAL STATUS",
                    "影响因素": "硬件性能,查询复杂度,并发数"
                },
                "TPS (Transactions Per Second)": {
                    "定义": "每秒执行的事务数量",
                    "计算公式": "(Com_commit + Com_rollback) / Uptime",
                    "监控方法": "SHOW GLOBAL STATUS",
                    "影响因素": "事务大小,锁竞争,I/O性能"
                }
            },
            "资源利用率指标": {
                "CPU使用率": {
                    "定义": "MySQL进程占用的CPU百分比",
                    "目标值": "< 80%",
                    "监控方法": "top, htop, pidstat",
                    "优化策略": "查询优化,索引优化,硬件升级"
                },
                "内存使用率": {
                    "定义": "MySQL占用的内存百分比",
                    "目标值": "< 85%",
                    "监控方法": "performance_schema.memory_summary_global_by_event_name",
                    "优化策略": "参数调优,查询优化"
                },
                "I/O使用率": {
                    "定义": "磁盘I/O的使用情况",
                    "目标值": "< 80%",
                    "监控方法": "iostat, iotop",
                    "优化策略": "SSD升级,I/O调度优化"
                }
            }
        }
        
        self.optimization_methodology = {
            "性能分析流程": {
                "1. 问题识别": [
                    "收集性能指标",
                    "识别性能瓶颈",
                    "分析用户反馈",
                    "监控告警分析"
                ],
                "2. 基线建立": [
                    "记录当前性能指标",
                    "建立性能基线",
                    "设定优化目标",
                    "制定测试计划"
                ],
                "3. 瓶颈分析": [
                    "CPU瓶颈分析",
                    "内存瓶颈分析",
                    "I/O瓶颈分析",
                    "网络瓶颈分析"
                ],
                "4. 优化实施": [
                    "制定优化方案",
                    "分阶段实施",
                    "效果验证",
                    "回滚准备"
                ],
                "5. 效果评估": [
                    "性能指标对比",
                    "业务影响评估",
                    "用户体验反馈",
                    "持续监控"
                ]
            },
            "优化原则": {
                "80/20原则": "80%的性能问题由20%的代码引起",
                "测量优先": "先测量,再优化,避免过早优化",
                "渐进式优化": "小步快跑,逐步优化",
                "全局考虑": "避免局部优化影响整体性能",
                "成本效益": "考虑优化成本和收益比"
            }
        }
    
    def show_optimization_levels(self):
        """显示优化层次"""
        print("MySQL性能优化层次:")
        print("=" * 60)
        
        for level, details in self.optimization_levels.items():
            print(f"\n{level}:")
            print(f"  描述: {details['描述']}")
            
            print(f"  优化方向:")
            for direction in details['优化方向']:
                print(f"    • {direction}")
            
            if '关键指标' in details:
                print(f"  关键指标:")
                for metric, target in details['关键指标'].items():
                    print(f"    {metric}: {target}")
            
            if '关键配置' in details:
                print(f"  关键配置:")
                for config, value in details['关键配置'].items():
                    print(f"    {config}: {value}")
            
            if '核心参数' in details:
                print(f"  核心参数:")
                for param, value in details['核心参数'].items():
                    print(f"    {param}: {value}")
            
            if '最佳实践' in details:
                print(f"  最佳实践:")
                for practice, desc in details['最佳实践'].items():
                    print(f"    {practice}: {desc}")
    
    def show_performance_metrics(self):
        """显示性能指标"""
        print("\n性能监控指标:")
        print("=" * 50)
        
        for category, metrics in self.performance_metrics.items():
            print(f"\n{category}:")
            for metric, details in metrics.items():
                print(f"\n  {metric}:")
                for key, value in details.items():
                    print(f"    {key}: {value}")
    
    def show_optimization_methodology(self):
        """显示优化方法论"""
        print("\n性能优化方法论:")
        print("=" * 50)
        
        for category, details in self.optimization_methodology.items():
            print(f"\n{category}:")
            
            if isinstance(details, dict):
                for key, value in details.items():
                    if isinstance(value, list):
                        print(f"  {key}:")
                        for item in value:
                            print(f"    • {item}")
                    else:
                        print(f"  {key}: {value}")

# 性能优化基础演示
perf_optimization_demo = MySQLPerformanceOptimization()
perf_optimization_demo.show_optimization_levels()
perf_optimization_demo.show_performance_metrics()
perf_optimization_demo.show_optimization_methodology()

9.1.2 性能分析工具

class MySQLPerformanceTools:
    def __init__(self):
        self.analysis_tools = {
            "内置工具": {
                "SHOW STATUS": {
                    "功能": "显示服务器状态变量",
                    "用法": "SHOW GLOBAL STATUS LIKE 'pattern'",
                    "关键指标": [
                        "Questions - 总查询数",
                        "Queries - 总语句数",
                        "Connections - 连接数",
                        "Threads_connected - 当前连接数",
                        "Slow_queries - 慢查询数"
                    ],
                    "示例": """
-- 查看关键性能指标
SHOW GLOBAL STATUS WHERE Variable_name IN (
    'Questions', 'Queries', 'Connections', 
    'Threads_connected', 'Slow_queries',
    'Innodb_buffer_pool_read_requests',
    'Innodb_buffer_pool_reads'
);
"""
                },
                "SHOW VARIABLES": {
                    "功能": "显示服务器配置变量",
                    "用法": "SHOW GLOBAL VARIABLES LIKE 'pattern'",
                    "关键参数": [
                        "innodb_buffer_pool_size",
                        "max_connections",
                        "innodb_log_file_size",
                        "query_cache_size",
                        "tmp_table_size"
                    ],
                    "示例": """
-- 查看关键配置参数
SHOW GLOBAL VARIABLES WHERE Variable_name IN (
    'innodb_buffer_pool_size',
    'max_connections',
    'innodb_log_file_size',
    'long_query_time'
);
"""
                },
                "SHOW PROCESSLIST": {
                    "功能": "显示当前运行的线程",
                    "用法": "SHOW FULL PROCESSLIST",
                    "信息内容": [
                        "Id - 线程ID",
                        "User - 用户名",
                        "Host - 客户端地址",
                        "db - 当前数据库",
                        "Command - 命令类型",
                        "Time - 执行时间",
                        "State - 线程状态",
                        "Info - SQL语句"
                    ],
                    "示例": """
-- 查看长时间运行的查询
SELECT 
    ID, USER, HOST, DB, COMMAND, TIME, STATE, 
    LEFT(INFO, 100) as QUERY
FROM INFORMATION_SCHEMA.PROCESSLIST 
WHERE TIME > 10 AND COMMAND != 'Sleep'
ORDER BY TIME DESC;
"""
                },
                "EXPLAIN": {
                    "功能": "分析SQL执行计划",
                    "用法": "EXPLAIN [FORMAT=JSON] SELECT ...",
                    "关键字段": [
                        "select_type - 查询类型",
                        "table - 表名",
                        "type - 连接类型",
                        "possible_keys - 可能使用的索引",
                        "key - 实际使用的索引",
                        "rows - 扫描行数",
                        "Extra - 额外信息"
                    ],
                    "示例": """
-- 分析查询执行计划
EXPLAIN FORMAT=JSON
SELECT u.name, p.title 
FROM users u 
JOIN posts p ON u.id = p.user_id 
WHERE u.status = 'active' 
AND p.created_at > '2024-01-01';
"""
                }
            },
            "Performance Schema": {
                "描述": "MySQL内置的性能监控框架",
                "主要表": {
                    "events_statements_summary_by_digest": {
                        "功能": "按SQL摘要统计执行信息",
                        "关键字段": [
                            "DIGEST_TEXT - SQL摘要",
                            "COUNT_STAR - 执行次数",
                            "AVG_TIMER_WAIT - 平均执行时间",
                            "SUM_ROWS_EXAMINED - 总扫描行数",
                            "SUM_ROWS_SENT - 总返回行数"
                        ],
                        "示例": """
-- 查找最耗时的SQL语句
SELECT 
    DIGEST_TEXT,
    COUNT_STAR as exec_count,
    AVG_TIMER_WAIT/1000000000 as avg_time_sec,
    SUM_ROWS_EXAMINED as total_rows_examined,
    SUM_ROWS_SENT as total_rows_sent
FROM performance_schema.events_statements_summary_by_digest 
WHERE DIGEST_TEXT IS NOT NULL
ORDER BY AVG_TIMER_WAIT DESC 
LIMIT 10;
"""
                    },
                    "file_summary_by_instance": {
                        "功能": "文件I/O统计信息",
                        "关键字段": [
                            "FILE_NAME - 文件名",
                            "COUNT_READ - 读取次数",
                            "COUNT_WRITE - 写入次数",
                            "SUM_TIMER_READ - 总读取时间",
                            "SUM_TIMER_WRITE - 总写入时间"
                        ],
                        "示例": """
-- 查看I/O最频繁的文件
SELECT 
    FILE_NAME,
    COUNT_READ,
    COUNT_WRITE,
    SUM_TIMER_READ/1000000000 as read_time_sec,
    SUM_TIMER_WRITE/1000000000 as write_time_sec
FROM performance_schema.file_summary_by_instance 
WHERE FILE_NAME LIKE '%.ibd'
ORDER BY (COUNT_READ + COUNT_WRITE) DESC 
LIMIT 10;
"""
                    },
                    "table_io_waits_summary_by_table": {
                        "功能": "表级I/O等待统计",
                        "关键字段": [
                            "OBJECT_SCHEMA - 数据库名",
                            "OBJECT_NAME - 表名",
                            "COUNT_READ - 读取次数",
                            "COUNT_WRITE - 写入次数",
                            "SUM_TIMER_READ - 总读取等待时间"
                        ],
                        "示例": """
-- 查看表级I/O统计
SELECT 
    OBJECT_SCHEMA,
    OBJECT_NAME,
    COUNT_READ,
    COUNT_WRITE,
    SUM_TIMER_READ/1000000000 as read_wait_sec
FROM performance_schema.table_io_waits_summary_by_table 
WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema', 'information_schema')
ORDER BY SUM_TIMER_READ DESC 
LIMIT 10;
"""
                    }
                },
                "配置启用": """
-- 启用Performance Schema
[mysqld]
performance_schema = ON
performance-schema-instrument = 'statement/%=ON'
performance-schema-consumer-events-statements-current = ON
performance-schema-consumer-events-statements-history = ON
performance-schema-consumer-events-statements-history-long = ON
"""
            },
            "第三方工具": {
                "pt-query-digest": {
                    "功能": "分析MySQL慢查询日志",
                    "安装": "yum install percona-toolkit",
                    "用法": "pt-query-digest /var/log/mysql/slow.log",
                    "特点": [
                        "详细的查询分析报告",
                        "按执行时间、频率排序",
                        "提供优化建议",
                        "支持多种日志格式"
                    ],
                    "示例": """
# 分析慢查询日志
pt-query-digest /var/log/mysql/slow.log > slow_query_report.txt

# 分析指定时间段的日志
pt-query-digest --since '2024-01-01 00:00:00' \
                --until '2024-01-01 23:59:59' \
                /var/log/mysql/slow.log

# 只分析特定数据库的查询
pt-query-digest --filter '$event->{db} eq "mydb"' \
                /var/log/mysql/slow.log
"""
                },
                "mysqladmin": {
                    "功能": "MySQL管理工具",
                    "常用命令": [
                        "mysqladmin status - 显示服务器状态",
                        "mysqladmin extended-status - 详细状态",
                        "mysqladmin processlist - 进程列表",
                        "mysqladmin variables - 变量列表"
                    ],
                    "示例": """
# 显示服务器状态
mysqladmin -u root -p status

# 持续监控状态(每5秒刷新)
mysqladmin -u root -p -i 5 status

# 显示扩展状态信息
mysqladmin -u root -p extended-status

# 显示当前进程
mysqladmin -u root -p processlist
"""
                },
                "mytop": {
                    "功能": "实时MySQL性能监控",
                    "安装": "yum install mytop",
                    "特点": [
                        "类似top的界面",
                        "实时显示查询",
                        "按各种指标排序",
                        "交互式操作"
                    ],
                    "示例": """
# 启动mytop
mytop -u root -p -h localhost

# 指定刷新间隔
mytop -u root -p -s 2

# 只显示特定数据库
mytop -u root -p -d mydb
"""
                }
            }
        }
        
        self.monitoring_queries = {
            "连接监控": """
-- 当前连接数和最大连接数
SELECT 
    VARIABLE_VALUE as current_connections
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Threads_connected'
UNION ALL
SELECT 
    VARIABLE_VALUE as max_connections
FROM performance_schema.global_variables 
WHERE VARIABLE_NAME = 'max_connections';

-- 连接来源统计
SELECT 
    SUBSTRING_INDEX(HOST, ':', 1) as client_ip,
    COUNT(*) as connection_count,
    GROUP_CONCAT(DISTINCT USER) as users
FROM information_schema.PROCESSLIST 
GROUP BY SUBSTRING_INDEX(HOST, ':', 1)
ORDER BY connection_count DESC;
""",
            "查询性能监控": """
-- 慢查询统计
SELECT 
    COUNT(*) as slow_query_count,
    AVG(query_time) as avg_query_time,
    MAX(query_time) as max_query_time
FROM mysql.slow_log 
WHERE start_time > DATE_SUB(NOW(), INTERVAL 1 HOUR);

-- 最耗时的查询类型
SELECT 
    sql_text,
    exec_count,
    avg_time,
    total_time
FROM (
    SELECT 
        LEFT(sql_text, 100) as sql_text,
        COUNT(*) as exec_count,
        AVG(query_time) as avg_time,
        SUM(query_time) as total_time
    FROM mysql.slow_log 
    WHERE start_time > DATE_SUB(NOW(), INTERVAL 1 DAY)
    GROUP BY LEFT(sql_text, 100)
) t
ORDER BY total_time DESC 
LIMIT 10;
""",
            "资源使用监控": """
-- InnoDB缓冲池命中率
SELECT 
    ROUND(
        (1 - (reads.VARIABLE_VALUE / requests.VARIABLE_VALUE)) * 100, 2
    ) as buffer_pool_hit_rate
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_buffer_pool_reads') reads,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_buffer_pool_read_requests') requests;

-- 临时表使用情况
SELECT 
    created_tmp_tables.VARIABLE_VALUE as tmp_tables_created,
    created_tmp_disk_tables.VARIABLE_VALUE as tmp_disk_tables_created,
    ROUND(
        (created_tmp_disk_tables.VARIABLE_VALUE / created_tmp_tables.VARIABLE_VALUE) * 100, 2
    ) as tmp_disk_table_ratio
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Created_tmp_tables') created_tmp_tables,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Created_tmp_disk_tables') created_tmp_disk_tables;
""",
            "锁等待监控": """
-- 当前锁等待情况
SELECT 
    r.trx_id waiting_trx_id,
    r.trx_mysql_thread_id waiting_thread,
    r.trx_query waiting_query,
    b.trx_id blocking_trx_id,
    b.trx_mysql_thread_id blocking_thread,
    b.trx_query blocking_query
FROM information_schema.innodb_lock_waits w
INNER JOIN information_schema.innodb_trx b ON b.trx_id = w.blocking_trx_id
INNER JOIN information_schema.innodb_trx r ON r.trx_id = w.requesting_trx_id;

-- 锁等待统计
SELECT 
    COUNT(*) as lock_waits,
    AVG(wait_time) as avg_wait_time,
    MAX(wait_time) as max_wait_time
FROM performance_schema.events_waits_history_long 
WHERE event_name LIKE '%lock%' 
AND timer_end IS NOT NULL;
"""
        }
    
    def show_analysis_tools(self):
        """显示分析工具"""
        print("MySQL性能分析工具:")
        print("=" * 60)
        
        for category, tools in self.analysis_tools.items():
            print(f"\n{category}:")
            
            if category == "Performance Schema":
                print(f"  描述: {tools['描述']}")
                print(f"\n  主要表:")
                for table, details in tools['主要表'].items():
                    print(f"\n    {table}:")
                    print(f"      功能: {details['功能']}")
                    print(f"      关键字段:")
                    for field in details['关键字段']:
                        print(f"        • {field}")
                    print(f"      示例:{details['示例']}")
                print(f"\n  配置启用:{tools['配置启用']}")
            else:
                for tool, details in tools.items():
                    print(f"\n  {tool}:")
                    print(f"    功能: {details['功能']}")
                    
                    if '用法' in details:
                        print(f"    用法: {details['用法']}")
                    if '安装' in details:
                        print(f"    安装: {details['安装']}")
                    
                    if '关键指标' in details:
                        print(f"    关键指标:")
                        for indicator in details['关键指标']:
                            print(f"      • {indicator}")
                    
                    if '关键参数' in details:
                        print(f"    关键参数:")
                        for param in details['关键参数']:
                            print(f"      • {param}")
                    
                    if '信息内容' in details:
                        print(f"    信息内容:")
                        for info in details['信息内容']:
                            print(f"      • {info}")
                    
                    if '关键字段' in details:
                        print(f"    关键字段:")
                        for field in details['关键字段']:
                            print(f"      • {field}")
                    
                    if '特点' in details:
                        print(f"    特点:")
                        for feature in details['特点']:
                            print(f"      • {feature}")
                    
                    if '常用命令' in details:
                        print(f"    常用命令:")
                        for command in details['常用命令']:
                            print(f"      • {command}")
                    
                    if '示例' in details:
                        print(f"    示例:{details['示例']}")
    
    def show_monitoring_queries(self):
        """显示监控查询"""
        print("\n常用监控查询:")
        print("=" * 50)
        
        for category, query in self.monitoring_queries.items():
            print(f"\n{category}:")
            print(query)

# 性能分析工具演示
perf_tools_demo = MySQLPerformanceTools()
perf_tools_demo.show_analysis_tools()
perf_tools_demo.show_monitoring_queries()

9.2 SQL优化

9.2.1 查询优化基础

class SQLOptimization:
    def __init__(self):
        self.optimization_principles = {
            "索引优化原则": {
                "选择性原则": {
                    "定义": "索引列的唯一值数量与总行数的比值",
                    "计算公式": "选择性 = DISTINCT(column) / COUNT(*)",
                    "优化建议": "选择性越高的列越适合建索引",
                    "示例": """
-- 计算列的选择性
SELECT 
    COUNT(DISTINCT user_id) / COUNT(*) as user_id_selectivity,
    COUNT(DISTINCT status) / COUNT(*) as status_selectivity,
    COUNT(DISTINCT created_date) / COUNT(*) as date_selectivity
FROM orders;
"""
                },
                "最左前缀原则": {
                    "定义": "复合索引只能从最左边的列开始使用",
                    "适用场景": "复合索引 (a, b, c) 可以支持 (a), (a,b), (a,b,c) 的查询",
                    "优化建议": "将选择性高的列放在前面",
                    "示例": """
-- 创建复合索引
CREATE INDEX idx_user_status_date ON orders(user_id, status, created_date);

-- 可以使用索引的查询
SELECT * FROM orders WHERE user_id = 123;
SELECT * FROM orders WHERE user_id = 123 AND status = 'completed';
SELECT * FROM orders WHERE user_id = 123 AND status = 'completed' AND created_date > '2024-01-01';

-- 无法使用索引的查询
SELECT * FROM orders WHERE status = 'completed';  -- 跳过了user_id
SELECT * FROM orders WHERE created_date > '2024-01-01';  -- 跳过了user_id和status
"""
                },
                "覆盖索引原则": {
                    "定义": "索引包含查询所需的所有列,无需回表查询",
                    "优势": "减少I/O操作,提高查询性能",
                    "适用场景": "频繁查询的列组合",
                    "示例": """
-- 创建覆盖索引
CREATE INDEX idx_user_status_amount ON orders(user_id, status, amount);

-- 使用覆盖索引的查询(无需回表)
SELECT user_id, status, amount 
FROM orders 
WHERE user_id = 123 AND status = 'completed';

-- 需要回表的查询
SELECT user_id, status, amount, description 
FROM orders 
WHERE user_id = 123 AND status = 'completed';
"""
                }
            },
            "查询重写技巧": {
                "子查询优化": {
                    "问题": "子查询可能导致性能问题",
                    "解决方案": "使用JOIN替代子查询",
                    "示例": """
-- 低效的子查询
SELECT * FROM users 
WHERE id IN (
    SELECT user_id FROM orders 
    WHERE status = 'completed'
);

-- 优化后的JOIN查询
SELECT DISTINCT u.* 
FROM users u 
INNER JOIN orders o ON u.id = o.user_id 
WHERE o.status = 'completed';

-- 使用EXISTS替代IN
SELECT * FROM users u
WHERE EXISTS (
    SELECT 1 FROM orders o 
    WHERE o.user_id = u.id AND o.status = 'completed'
);
"""
                },
                "LIMIT优化": {
                    "问题": "大偏移量的LIMIT查询性能差",
                    "解决方案": "使用游标分页或延迟关联",
                    "示例": """
-- 低效的分页查询
SELECT * FROM orders 
ORDER BY created_at 
LIMIT 100000, 20;

-- 优化方案1:使用游标分页
SELECT * FROM orders 
WHERE created_at > '2024-01-01 10:30:00' 
ORDER BY created_at 
LIMIT 20;

-- 优化方案2:延迟关联
SELECT o.* FROM orders o
INNER JOIN (
    SELECT id FROM orders 
    ORDER BY created_at 
    LIMIT 100000, 20
) t ON o.id = t.id;

-- 优化方案3:使用主键范围
SELECT * FROM orders 
WHERE id > 1000000 
ORDER BY id 
LIMIT 20;
"""
                },
                "OR条件优化": {
                    "问题": "OR条件可能无法有效使用索引",
                    "解决方案": "使用UNION替代OR",
                    "示例": """
-- 低效的OR查询
SELECT * FROM orders 
WHERE user_id = 123 OR status = 'pending';

-- 优化后的UNION查询
SELECT * FROM orders WHERE user_id = 123
UNION
SELECT * FROM orders WHERE status = 'pending' AND user_id != 123;

-- 或者使用UNION ALL(如果确定没有重复)
SELECT * FROM orders WHERE user_id = 123
UNION ALL
SELECT * FROM orders WHERE status = 'pending' AND user_id != 123;
"""
                },
                "函数优化": {
                    "问题": "在WHERE子句中使用函数会导致索引失效",
                    "解决方案": "避免在索引列上使用函数",
                    "示例": """
-- 低效查询(索引失效)
SELECT * FROM orders 
WHERE YEAR(created_at) = 2024;

SELECT * FROM orders 
WHERE UPPER(status) = 'COMPLETED';

-- 优化后的查询
SELECT * FROM orders 
WHERE created_at >= '2024-01-01' 
AND created_at < '2025-01-01';

SELECT * FROM orders 
WHERE status = 'completed';  -- 确保数据一致性

-- 如果必须使用函数,可以创建函数索引(MySQL 8.0+)
CREATE INDEX idx_year_created ON orders((YEAR(created_at)));
"""
                }
            },
            "JOIN优化策略": {
                "JOIN类型选择": {
                    "INNER JOIN": "只返回两表都有匹配的记录,性能最好",
                    "LEFT JOIN": "返回左表所有记录,右表匹配记录",
                    "RIGHT JOIN": "返回右表所有记录,左表匹配记录",
                    "FULL OUTER JOIN": "返回两表所有记录,MySQL不直接支持",
                    "优化建议": "优先使用INNER JOIN,避免不必要的OUTER JOIN"
                },
                "JOIN顺序优化": {
                    "原则": "小表驱动大表,减少循环次数",
                    "MySQL优化器": "会自动选择最优的JOIN顺序",
                    "手动优化": "使用STRAIGHT_JOIN强制指定顺序",
                    "示例": """
-- 让MySQL优化器自动选择(推荐)
SELECT u.name, o.amount 
FROM users u 
INNER JOIN orders o ON u.id = o.user_id 
WHERE u.status = 'active';

-- 强制指定JOIN顺序(谨慎使用)
SELECT STRAIGHT_JOIN u.name, o.amount 
FROM users u 
INNER JOIN orders o ON u.id = o.user_id 
WHERE u.status = 'active';
"""
                },
                "JOIN条件优化": {
                    "索引要求": "JOIN列必须有索引",
                    "数据类型": "JOIN列的数据类型必须一致",
                    "字符集": "字符串列的字符集和排序规则要一致",
                    "示例": """
-- 确保JOIN列有索引
CREATE INDEX idx_user_id ON orders(user_id);
CREATE INDEX idx_id ON users(id);

-- 数据类型一致
-- users.id: INT
-- orders.user_id: INT (不是VARCHAR)

-- 字符集一致
ALTER TABLE users MODIFY name VARCHAR(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
ALTER TABLE orders MODIFY user_name VARCHAR(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
"""
                }
            }
        }
        
        self.common_anti_patterns = {
            "SELECT * 问题": {
                "问题描述": "查询不需要的列,浪费网络带宽和内存",
                "影响": ["增加网络传输量", "占用更多内存", "可能导致索引失效"],
                "解决方案": "只查询需要的列",
                "示例": """
-- 不好的做法
SELECT * FROM users WHERE id = 123;

-- 好的做法
SELECT id, name, email FROM users WHERE id = 123;
"""
            },
            "N+1查询问题": {
                "问题描述": "在循环中执行查询,导致大量数据库请求",
                "影响": ["大量数据库连接", "网络往返次数多", "性能急剧下降"],
                "解决方案": "使用JOIN或IN查询批量获取数据",
                "示例": """
-- N+1查询问题(应用层代码)
-- 1. 查询所有用户
SELECT id, name FROM users;
-- 2. 为每个用户查询订单(N次查询)
SELECT * FROM orders WHERE user_id = ?;

-- 解决方案:使用JOIN
SELECT u.id, u.name, o.id as order_id, o.amount 
FROM users u 
LEFT JOIN orders o ON u.id = o.user_id;

-- 或者使用IN查询
SELECT * FROM orders WHERE user_id IN (1, 2, 3, 4, 5);
"""
            },
            "隐式类型转换": {
                "问题描述": "数据类型不匹配导致索引失效",
                "影响": ["索引无法使用", "全表扫描", "性能下降"],
                "解决方案": "确保查询条件的数据类型与列定义一致",
                "示例": """
-- 问题:字符串列与数字比较
-- user_id是VARCHAR类型
SELECT * FROM users WHERE user_id = 123;  -- 隐式转换

-- 解决方案
SELECT * FROM users WHERE user_id = '123';

-- 问题:数字列与字符串比较
-- id是INT类型
SELECT * FROM users WHERE id = '123';  -- 可能的隐式转换

-- 解决方案
SELECT * FROM users WHERE id = 123;
"""
            },
            "不合理的GROUP BY": {
                "问题描述": "GROUP BY使用不当导致性能问题",
                "影响": ["临时表创建", "排序开销", "内存使用增加"],
                "解决方案": "优化GROUP BY列的顺序和索引",
                "示例": """
-- 低效的GROUP BY
SELECT status, COUNT(*) 
FROM orders 
GROUP BY status 
ORDER BY COUNT(*) DESC;

-- 优化:为GROUP BY列创建索引
CREATE INDEX idx_status ON orders(status);

-- 进一步优化:使用覆盖索引
CREATE INDEX idx_status_id ON orders(status, id);

-- 避免GROUP BY后再排序
SELECT status, COUNT(*) as cnt
FROM orders 
GROUP BY status 
ORDER BY status;  -- 利用索引顺序
"""
            }
        }
    
    def show_optimization_principles(self):
        """显示优化原则"""
        print("SQL优化原则:")
        print("=" * 60)
        
        for category, principles in self.optimization_principles.items():
            print(f"\n{category}:")
            
            for principle, details in principles.items():
                print(f"\n  {principle}:")
                
                if isinstance(details, dict):
                    for key, value in details.items():
                        if key == '示例':
                            print(f"    {key}:{value}")
                        else:
                            print(f"    {key}: {value}")
                else:
                    print(f"    {details}")
    
    def show_anti_patterns(self):
        """显示反模式"""
        print("\n常见SQL反模式:")
        print("=" * 50)
        
        for pattern, details in self.common_anti_patterns.items():
            print(f"\n{pattern}:")
            print(f"  问题描述: {details['问题描述']}")
            
            if '影响' in details:
                print(f"  影响:")
                for impact in details['影响']:
                    print(f"    • {impact}")
            
            print(f"  解决方案: {details['解决方案']}")
            print(f"  示例:{details['示例']}")

# SQL优化演示
sql_optimization_demo = SQLOptimization()
sql_optimization_demo.show_optimization_principles()
sql_optimization_demo.show_anti_patterns()

9.2.2 索引优化实践

class IndexOptimization:
    def __init__(self):
        self.index_types = {
            "B-Tree索引": {
                "特点": "MySQL默认索引类型,适用于大多数场景",
                "适用场景": [
                    "等值查询 (=)",
                    "范围查询 (>, <, BETWEEN)",
                    "前缀匹配 (LIKE 'prefix%')",
                    "ORDER BY排序"
                ],
                "不适用场景": [
                    "后缀匹配 (LIKE '%suffix')",
                    "函数计算结果查询",
                    "非连续范围查询"
                ],
                "示例": """
-- 创建B-Tree索引
CREATE INDEX idx_user_email ON users(email);
CREATE INDEX idx_order_date ON orders(created_at);
CREATE INDEX idx_user_status ON users(user_id, status);

-- 适用的查询
SELECT * FROM users WHERE email = 'user@example.com';
SELECT * FROM orders WHERE created_at > '2024-01-01';
SELECT * FROM orders WHERE created_at BETWEEN '2024-01-01' AND '2024-12-31';
SELECT * FROM users WHERE name LIKE 'John%';
"""
            },
            "Hash索引": {
                "特点": "基于哈希表,只支持等值查询",
                "适用场景": [
                    "等值查询 (=)",
                    "IN查询",
                    "高并发等值查询"
                ],
                "不适用场景": [
                    "范围查询",
                    "排序查询",
                    "前缀匹配"
                ],
                "限制": "InnoDB不直接支持,只有Memory引擎支持",
                "示例": """
-- Memory引擎表使用Hash索引
CREATE TABLE cache_table (
    id INT PRIMARY KEY,
    key_name VARCHAR(100),
    value TEXT,
    INDEX USING HASH (key_name)
) ENGINE=MEMORY;

-- 适用的查询
SELECT * FROM cache_table WHERE key_name = 'user_session_123';
SELECT * FROM cache_table WHERE key_name IN ('key1', 'key2', 'key3');
"""
            },
            "全文索引": {
                "特点": "用于全文搜索,支持自然语言和布尔模式",
                "适用场景": [
                    "文本内容搜索",
                    "关键词匹配",
                    "相关性排序"
                ],
                "支持类型": "CHAR, VARCHAR, TEXT",
                "示例": """
-- 创建全文索引
CREATE FULLTEXT INDEX idx_content ON articles(title, content);

-- 自然语言模式搜索
SELECT *, MATCH(title, content) AGAINST('MySQL performance') as relevance
FROM articles 
WHERE MATCH(title, content) AGAINST('MySQL performance')
ORDER BY relevance DESC;

-- 布尔模式搜索
SELECT * FROM articles 
WHERE MATCH(title, content) AGAINST('+MySQL +performance -slow' IN BOOLEAN MODE);

-- 查询扩展模式
SELECT * FROM articles 
WHERE MATCH(title, content) AGAINST('database' WITH QUERY EXPANSION);
"""
            },
            "空间索引": {
                "特点": "用于地理空间数据查询",
                "适用场景": [
                    "地理位置查询",
                    "空间范围查询",
                    "距离计算"
                ],
                "支持类型": "GEOMETRY, POINT, LINESTRING, POLYGON",
                "示例": """
-- 创建空间索引
CREATE TABLE locations (
    id INT PRIMARY KEY,
    name VARCHAR(100),
    coordinates POINT NOT NULL,
    SPATIAL INDEX idx_coordinates (coordinates)
);

-- 插入空间数据
INSERT INTO locations (id, name, coordinates) VALUES
(1, 'Location A', POINT(40.7128, -74.0060)),
(2, 'Location B', POINT(34.0522, -118.2437));

-- 空间查询
SELECT name, 
       ST_X(coordinates) as latitude,
       ST_Y(coordinates) as longitude,
       ST_Distance_Sphere(coordinates, POINT(40.7589, -73.9851)) as distance_meters
FROM locations 
WHERE ST_Distance_Sphere(coordinates, POINT(40.7589, -73.9851)) < 10000;
"""
            }
        }
        
        self.index_design_strategies = {
            "单列索引设计": {
                "选择原则": [
                    "高选择性列优先",
                    "频繁查询列优先",
                    "WHERE条件列优先",
                    "JOIN条件列必须有索引"
                ],
                "评估方法": """
-- 计算列的选择性
SELECT 
    'user_id' as column_name,
    COUNT(DISTINCT user_id) as distinct_values,
    COUNT(*) as total_rows,
    COUNT(DISTINCT user_id) / COUNT(*) as selectivity
FROM orders
UNION ALL
SELECT 
    'status' as column_name,
    COUNT(DISTINCT status) as distinct_values,
    COUNT(*) as total_rows,
    COUNT(DISTINCT status) / COUNT(*) as selectivity
FROM orders;

-- 分析查询频率
SELECT 
    DIGEST_TEXT,
    COUNT_STAR as execution_count,
    AVG_TIMER_WAIT/1000000000 as avg_time_seconds
FROM performance_schema.events_statements_summary_by_digest 
WHERE DIGEST_TEXT LIKE '%WHERE%user_id%'
ORDER BY COUNT_STAR DESC;
"""
            },
            "复合索引设计": {
                "设计原则": [
                    "最左前缀原则",
                    "选择性高的列在前",
                    "等值条件列在前",
                    "范围条件列在后"
                ],
                "列顺序策略": {
                    "等值 + 等值 + 范围": "(a=? AND b=? AND c>?)",
                    "等值 + 范围": "(a=? AND b>?)",
                    "多等值条件": "(a=? AND b=? AND c=?)"
                },
                "示例": """
-- 分析查询模式
-- 查询1: WHERE user_id = ? AND status = ? AND created_at > ?
-- 查询2: WHERE user_id = ? AND status = ?
-- 查询3: WHERE user_id = ?

-- 设计复合索引
CREATE INDEX idx_user_status_date ON orders(user_id, status, created_at);

-- 验证索引使用情况
EXPLAIN SELECT * FROM orders 
WHERE user_id = 123 AND status = 'completed' AND created_at > '2024-01-01';

EXPLAIN SELECT * FROM orders 
WHERE user_id = 123 AND status = 'completed';

EXPLAIN SELECT * FROM orders 
WHERE user_id = 123;

-- 无法使用索引的查询
EXPLAIN SELECT * FROM orders 
WHERE status = 'completed' AND created_at > '2024-01-01';  -- 跳过了user_id
"""
            },
            "覆盖索引设计": {
                "定义": "索引包含查询所需的所有列,避免回表操作",
                "优势": [
                    "减少I/O操作",
                    "提高查询性能",
                    "降低锁竞争"
                ],
                "设计策略": [
                    "分析SELECT列表",
                    "包含WHERE条件列",
                    "包含ORDER BY列",
                    "考虑索引大小"
                ],
                "示例": """
-- 分析查询需求
SELECT user_id, status, amount, created_at 
FROM orders 
WHERE user_id = 123 AND status = 'completed'
ORDER BY created_at DESC;

-- 创建覆盖索引
CREATE INDEX idx_covering ON orders(user_id, status, created_at, amount);

-- 验证覆盖索引效果
EXPLAIN SELECT user_id, status, amount, created_at 
FROM orders 
WHERE user_id = 123 AND status = 'completed'
ORDER BY created_at DESC;
-- Extra列应该显示"Using index"

-- 对比:需要回表的查询
EXPLAIN SELECT user_id, status, amount, created_at, description 
FROM orders 
WHERE user_id = 123 AND status = 'completed'
ORDER BY created_at DESC;
-- 因为需要description列,无法使用覆盖索引
"""
            }
        }
        
        self.index_maintenance = {
            "索引监控": {
                "使用率监控": """
-- 查看索引使用统计
SELECT 
    OBJECT_SCHEMA,
    OBJECT_NAME,
    INDEX_NAME,
    COUNT_FETCH,
    COUNT_INSERT,
    COUNT_UPDATE,
    COUNT_DELETE
FROM performance_schema.table_io_waits_summary_by_index_usage 
WHERE OBJECT_SCHEMA = 'your_database'
ORDER BY COUNT_FETCH DESC;

-- 查找未使用的索引
SELECT 
    OBJECT_SCHEMA,
    OBJECT_NAME,
    INDEX_NAME
FROM performance_schema.table_io_waits_summary_by_index_usage 
WHERE OBJECT_SCHEMA = 'your_database'
AND INDEX_NAME IS NOT NULL 
AND INDEX_NAME != 'PRIMARY'
AND COUNT_FETCH = 0
AND COUNT_INSERT = 0
AND COUNT_UPDATE = 0
AND COUNT_DELETE = 0;
""",
                "重复索引检测": """
-- 检测重复索引
SELECT 
    TABLE_SCHEMA,
    TABLE_NAME,
    GROUP_CONCAT(INDEX_NAME) as duplicate_indexes,
    GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX) as columns
FROM information_schema.STATISTICS 
WHERE TABLE_SCHEMA = 'your_database'
GROUP BY TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX)
HAVING COUNT(*) > 1;

-- 检测冗余索引(前缀重复)
SELECT 
    s1.TABLE_SCHEMA,
    s1.TABLE_NAME,
    s1.INDEX_NAME as redundant_index,
    s2.INDEX_NAME as covering_index,
    GROUP_CONCAT(s1.COLUMN_NAME ORDER BY s1.SEQ_IN_INDEX) as redundant_columns,
    GROUP_CONCAT(s2.COLUMN_NAME ORDER BY s2.SEQ_IN_INDEX) as covering_columns
FROM information_schema.STATISTICS s1
JOIN information_schema.STATISTICS s2 ON 
    s1.TABLE_SCHEMA = s2.TABLE_SCHEMA 
    AND s1.TABLE_NAME = s2.TABLE_NAME 
    AND s1.INDEX_NAME != s2.INDEX_NAME
WHERE s1.TABLE_SCHEMA = 'your_database'
GROUP BY s1.TABLE_SCHEMA, s1.TABLE_NAME, s1.INDEX_NAME, s2.INDEX_NAME
HAVING SUBSTRING_INDEX(GROUP_CONCAT(s2.COLUMN_NAME ORDER BY s2.SEQ_IN_INDEX), ',', 
       COUNT(DISTINCT s1.COLUMN_NAME)) = 
       GROUP_CONCAT(s1.COLUMN_NAME ORDER BY s1.SEQ_IN_INDEX);
"""
            },
            "索引维护操作": {
                "重建索引": """
-- 在线重建索引(MySQL 5.6+)
ALTER TABLE orders DROP INDEX idx_user_status, ALGORITHM=INPLACE, LOCK=NONE;
ALTER TABLE orders ADD INDEX idx_user_status (user_id, status), ALGORITHM=INPLACE, LOCK=NONE;

-- 或者使用一条语句
ALTER TABLE orders 
DROP INDEX idx_user_status,
ADD INDEX idx_user_status_new (user_id, status, created_at),
ALGORITHM=INPLACE, LOCK=NONE;
""",
                "索引统计更新": """
-- 更新表统计信息
ANALYZE TABLE orders;

-- 更新特定索引统计信息
ALTER TABLE orders FORCE;

-- 检查索引基数
SHOW INDEX FROM orders;
""",
                "索引碎片整理": """
-- 检查表碎片
SELECT 
    TABLE_SCHEMA,
    TABLE_NAME,
    DATA_LENGTH,
    INDEX_LENGTH,
    DATA_FREE,
    (DATA_FREE / (DATA_LENGTH + INDEX_LENGTH)) * 100 as fragmentation_ratio
FROM information_schema.TABLES 
WHERE TABLE_SCHEMA = 'your_database'
AND DATA_FREE > 0
ORDER BY fragmentation_ratio DESC;

-- 优化表(重建索引和整理碎片)
OPTIMIZE TABLE orders;

-- 或者使用ALTER TABLE重建
ALTER TABLE orders ENGINE=InnoDB;
"""
            }
        }
    
    def show_index_types(self):
        """显示索引类型"""
        print("MySQL索引类型:")
        print("=" * 60)
        
        for index_type, details in self.index_types.items():
            print(f"\n{index_type}:")
            print(f"  特点: {details['特点']}")
            
            if '适用场景' in details:
                print(f"  适用场景:")
                for scenario in details['适用场景']:
                    print(f"    • {scenario}")
            
            if '不适用场景' in details:
                print(f"  不适用场景:")
                for scenario in details['不适用场景']:
                    print(f"    • {scenario}")
            
            if '支持类型' in details:
                print(f"  支持类型: {details['支持类型']}")
            
            if '限制' in details:
                print(f"  限制: {details['限制']}")
            
            print(f"  示例:{details['示例']}")
    
    def show_design_strategies(self):
        """显示设计策略"""
        print("\n索引设计策略:")
        print("=" * 50)
        
        for strategy, details in self.index_design_strategies.items():
            print(f"\n{strategy}:")
            
            if '选择原则' in details:
                print(f"  选择原则:")
                for principle in details['选择原则']:
                    print(f"    • {principle}")
            
            if '设计原则' in details:
                print(f"  设计原则:")
                for principle in details['设计原则']:
                    print(f"    • {principle}")
            
            if '列顺序策略' in details:
                print(f"  列顺序策略:")
                for strategy_name, pattern in details['列顺序策略'].items():
                    print(f"    {strategy_name}: {pattern}")
            
            if '优势' in details:
                print(f"  优势:")
                for advantage in details['优势']:
                    print(f"    • {advantage}")
            
            if '设计策略' in details:
                print(f"  设计策略:")
                for strategy_item in details['设计策略']:
                    print(f"    • {strategy_item}")
            
            if '评估方法' in details:
                print(f"  评估方法:{details['评估方法']}")
            
            if '示例' in details:
                print(f"  示例:{details['示例']}")
    
    def show_maintenance(self):
        """显示维护方法"""
        print("\n索引维护:")
        print("=" * 40)
        
        for category, operations in self.index_maintenance.items():
            print(f"\n{category}:")
            
            for operation, content in operations.items():
                print(f"\n  {operation}:{content}")

# 索引优化演示
index_optimization_demo = IndexOptimization()
index_optimization_demo.show_index_types()
index_optimization_demo.show_design_strategies()
index_optimization_demo.show_maintenance()

9.3 配置优化

9.3.1 内存配置优化

class MySQLMemoryOptimization:
    def __init__(self):
        self.memory_parameters = {
            "InnoDB缓冲池配置": {
                "innodb_buffer_pool_size": {
                    "描述": "InnoDB缓冲池大小,最重要的内存参数",
                    "推荐值": "物理内存的70-80%(专用数据库服务器)",
                    "计算方法": "总内存 - 操作系统 - 其他应用 - MySQL其他缓存",
                    "注意事项": [
                        "不要设置超过物理内存",
                        "考虑操作系统和其他进程的内存需求",
                        "MySQL 5.7+支持在线调整"
                    ],
                    "配置示例": """
# 16GB内存服务器配置
[mysqld]
innodb_buffer_pool_size = 12G

# 32GB内存服务器配置
innodb_buffer_pool_size = 24G

# 在线调整(MySQL 5.7+)
SET GLOBAL innodb_buffer_pool_size = 12*1024*1024*1024;

# 查看当前配置
SHOW VARIABLES LIKE 'innodb_buffer_pool_size';

# 查看缓冲池使用情况
SHOW ENGINE INNODB STATUS\G
"""
                },
                "innodb_buffer_pool_instances": {
                    "描述": "缓冲池实例数,减少并发竞争",
                    "推荐值": "缓冲池大小GB数,最大64",
                    "计算公式": "min(缓冲池大小(GB), CPU核心数, 64)",
                    "配置示例": """
# 12GB缓冲池配置
innodb_buffer_pool_size = 12G
innodb_buffer_pool_instances = 12

# 24GB缓冲池配置
innodb_buffer_pool_size = 24G
innodb_buffer_pool_instances = 24

# 查看实例状态
SELECT 
    POOL_ID,
    POOL_SIZE,
    FREE_BUFFERS,
    DATABASE_PAGES,
    OLD_DATABASE_PAGES
FROM information_schema.INNODB_BUFFER_POOL_STATS;
"""
                },
                "innodb_log_buffer_size": {
                    "描述": "InnoDB日志缓冲区大小",
                    "推荐值": "16MB-64MB",
                    "影响因素": "事务大小、写入频率",
                    "配置示例": """
# 一般配置
innodb_log_buffer_size = 32M

# 高并发写入场景
innodb_log_buffer_size = 64M

# 监控日志缓冲区使用
SHOW ENGINE INNODB STATUS\G
# 查看 "Log sequence number" 和 "Log flushed up to"
"""
                }
            },
            "查询缓存配置": {
                "query_cache_size": {
                    "描述": "查询缓存大小(MySQL 8.0已移除)",
                    "推荐值": "0(禁用)或128MB-512MB",
                    "注意事项": [
                        "MySQL 8.0已移除查询缓存",
                        "高并发写入场景建议禁用",
                        "只适合读多写少的场景"
                    ],
                    "配置示例": """
# MySQL 5.7及以下版本
# 禁用查询缓存(推荐)
query_cache_type = 0
query_cache_size = 0

# 启用查询缓存(谨慎使用)
query_cache_type = 1
query_cache_size = 256M
query_cache_limit = 2M

# 查看查询缓存状态
SHOW VARIABLES LIKE 'query_cache%';
SHOW STATUS LIKE 'Qcache%';
"""
                }
            },
            "连接缓存配置": {
                "thread_cache_size": {
                    "描述": "线程缓存大小,复用连接线程",
                    "推荐值": "8 + (max_connections / 100)",
                    "计算示例": "max_connections=1000时,thread_cache_size=18",
                    "配置示例": """
# 基础配置
max_connections = 1000
thread_cache_size = 18

# 高并发配置
max_connections = 2000
thread_cache_size = 28

# 监控线程缓存效率
SHOW STATUS LIKE 'Threads_%';
SHOW STATUS LIKE 'Connections';

# 计算线程缓存命中率
# Thread_cache_hit_rate = (Connections - Threads_created) / Connections * 100
"""
                },
                "table_open_cache": {
                    "描述": "表文件描述符缓存",
                    "推荐值": "max_connections * 表数量 / 并发连接数",
                    "最小值": "400",
                    "配置示例": """
# 基础配置
table_open_cache = 2000

# 大量表的场景
table_open_cache = 4000

# 监控表缓存使用
SHOW STATUS LIKE 'Open%';
SHOW STATUS LIKE 'Opened_tables';

# 如果Opened_tables增长很快,需要增加table_open_cache
"""
                }
            },
            "排序和临时表配置": {
                "sort_buffer_size": {
                    "描述": "每个连接的排序缓冲区大小",
                    "推荐值": "256KB-2MB",
                    "注意事项": "过大会消耗大量内存",
                    "配置示例": """
# 一般配置
sort_buffer_size = 512K

# 大量排序操作
sort_buffer_size = 1M

# 监控排序操作
SHOW STATUS LIKE 'Sort_%';
# Sort_merge_passes > 0 表示需要多次排序
"""
                },
                "tmp_table_size": {
                    "描述": "内存临时表最大大小",
                    "推荐值": "32MB-128MB",
                    "相关参数": "max_heap_table_size",
                    "配置示例": """
# 基础配置
tmp_table_size = 64M
max_heap_table_size = 64M

# 大量GROUP BY、ORDER BY操作
tmp_table_size = 128M
max_heap_table_size = 128M

# 监控临时表使用
SHOW STATUS LIKE 'Created_tmp%';
# Created_tmp_disk_tables / Created_tmp_tables 比例应该较低
"""
                },
                "join_buffer_size": {
                    "描述": "JOIN操作缓冲区大小",
                    "推荐值": "256KB-1MB",
                    "使用场景": "没有索引的JOIN操作",
                    "配置示例": """
# 一般配置
join_buffer_size = 512K

# 大量JOIN操作
join_buffer_size = 1M

# 监控JOIN缓冲区使用
# 通过EXPLAIN查看是否使用了join buffer
EXPLAIN SELECT * FROM table1 t1 JOIN table2 t2 ON t1.col = t2.col;
"""
                }
            }
        }
        
        self.memory_monitoring = {
            "内存使用监控": """
-- 查看全局内存使用情况
SELECT 
    EVENT_NAME,
    CURRENT_COUNT_USED,
    CURRENT_NUMBER_OF_BYTES_USED/1024/1024 as current_mb,
    HIGH_COUNT_USED,
    HIGH_NUMBER_OF_BYTES_USED/1024/1024 as high_mb
FROM performance_schema.memory_summary_global_by_event_name 
WHERE CURRENT_NUMBER_OF_BYTES_USED > 0
ORDER BY CURRENT_NUMBER_OF_BYTES_USED DESC
LIMIT 20;

-- 查看InnoDB缓冲池状态
SELECT 
    POOL_ID,
    POOL_SIZE * @@innodb_page_size / 1024 / 1024 as pool_size_mb,
    FREE_BUFFERS * @@innodb_page_size / 1024 / 1024 as free_mb,
    DATABASE_PAGES * @@innodb_page_size / 1024 / 1024 as data_mb,
    (DATABASE_PAGES / POOL_SIZE) * 100 as usage_percent
FROM information_schema.INNODB_BUFFER_POOL_STATS;

-- 查看缓冲池命中率
SELECT 
    ROUND(
        (1 - (reads.VARIABLE_VALUE / requests.VARIABLE_VALUE)) * 100, 2
    ) as buffer_pool_hit_rate_percent
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_buffer_pool_reads') reads,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_buffer_pool_read_requests') requests;
""",
            "内存配置建议": """
-- 生成内存配置建议的查询
SELECT 
    'innodb_buffer_pool_size' as parameter,
    CONCAT(
        'Current: ', 
        ROUND(@@innodb_buffer_pool_size/1024/1024/1024, 1), 'GB, ',
        'Recommended: 70-80% of RAM'
    ) as recommendation
UNION ALL
SELECT 
    'thread_cache_size' as parameter,
    CONCAT(
        'Current: ', @@thread_cache_size, ', ',
        'Recommended: ', 8 + ROUND(@@max_connections/100)
    ) as recommendation
UNION ALL
SELECT 
    'tmp_table_size' as parameter,
    CONCAT(
        'Current: ', ROUND(@@tmp_table_size/1024/1024), 'MB, ',
        'Disk tmp tables: ', 
        (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
         WHERE VARIABLE_NAME = 'Created_tmp_disk_tables'),
        '/', 
        (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
         WHERE VARIABLE_NAME = 'Created_tmp_tables')
    ) as recommendation;
"""
        }
        
        self.optimization_scripts = {
            "内存配置脚本": """
#!/bin/bash
# MySQL内存配置优化脚本

# 获取系统内存信息
TOTAL_RAM=$(free -g | awk 'NR==2{print $2}')
echo "Total RAM: ${TOTAL_RAM}GB"

# 计算推荐的InnoDB缓冲池大小(70%)
BUFFER_POOL_SIZE=$((TOTAL_RAM * 70 / 100))
echo "Recommended innodb_buffer_pool_size: ${BUFFER_POOL_SIZE}GB"

# 计算缓冲池实例数
BUFFER_POOL_INSTANCES=$((BUFFER_POOL_SIZE < 64 ? BUFFER_POOL_SIZE : 64))
echo "Recommended innodb_buffer_pool_instances: ${BUFFER_POOL_INSTANCES}"

# 生成配置文件
cat > /tmp/mysql_memory_config.cnf << EOF
[mysqld]
# InnoDB缓冲池配置
innodb_buffer_pool_size = ${BUFFER_POOL_SIZE}G
innodb_buffer_pool_instances = ${BUFFER_POOL_INSTANCES}

# 日志缓冲区
innodb_log_buffer_size = 32M

# 连接和线程缓存
thread_cache_size = 18
table_open_cache = 2000

# 排序和临时表
sort_buffer_size = 512K
tmp_table_size = 64M
max_heap_table_size = 64M
join_buffer_size = 512K

# 查询缓存(MySQL 5.7及以下)
query_cache_type = 0
query_cache_size = 0
EOF

echo "Configuration saved to /tmp/mysql_memory_config.cnf"
echo "Please review and merge with your MySQL configuration file."
""",
            "内存监控脚本": """
#!/bin/bash
# MySQL内存使用监控脚本

MYSQL_USER="monitor"
MYSQL_PASS="password"
MYSQL_HOST="localhost"

echo "=== MySQL Memory Usage Report ==="
echo "Generated at: $(date)"
echo

# 系统内存信息
echo "=== System Memory ==="
free -h
echo

# MySQL进程内存使用
echo "=== MySQL Process Memory ==="
ps aux | grep mysqld | grep -v grep | awk '{print "PID: " $2 ", Memory: " $4 "%, RSS: " $6/1024 "MB"}'
echo

# InnoDB缓冲池状态
echo "=== InnoDB Buffer Pool Status ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    CONCAT(ROUND(@@innodb_buffer_pool_size/1024/1024/1024, 1), 'GB') as configured_size,
    CONCAT(ROUND(SUM(POOL_SIZE * @@innodb_page_size)/1024/1024/1024, 1), 'GB') as actual_size,
    CONCAT(ROUND(SUM(FREE_BUFFERS * @@innodb_page_size)/1024/1024/1024, 1), 'GB') as free_size,
    CONCAT(ROUND(SUM(DATABASE_PAGES * @@innodb_page_size)/1024/1024/1024, 1), 'GB') as data_size,
    CONCAT(ROUND(AVG(DATABASE_PAGES / POOL_SIZE) * 100, 1), '%') as usage_percent
FROM information_schema.INNODB_BUFFER_POOL_STATS;"
echo

# 缓冲池命中率
echo "=== Buffer Pool Hit Rate ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    CONCAT(ROUND((1 - (reads.VARIABLE_VALUE / requests.VARIABLE_VALUE)) * 100, 2), '%') as hit_rate
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_buffer_pool_reads') reads,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_buffer_pool_read_requests') requests;"
echo

# 临时表使用情况
echo "=== Temporary Table Usage ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    created_tmp_tables.VARIABLE_VALUE as tmp_tables_created,
    created_tmp_disk_tables.VARIABLE_VALUE as tmp_disk_tables_created,
    CONCAT(ROUND((created_tmp_disk_tables.VARIABLE_VALUE / created_tmp_tables.VARIABLE_VALUE) * 100, 2), '%') as disk_tmp_ratio
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Created_tmp_tables') created_tmp_tables,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Created_tmp_disk_tables') created_tmp_disk_tables;"
"""
        }
    
    def show_memory_parameters(self):
        """显示内存参数"""
        print("MySQL内存配置参数:")
        print("=" * 60)
        
        for category, parameters in self.memory_parameters.items():
            print(f"\n{category}:")
            
            for param, details in parameters.items():
                print(f"\n  {param}:")
                print(f"    描述: {details['描述']}")
                
                if '推荐值' in details:
                    print(f"    推荐值: {details['推荐值']}")
                if '计算方法' in details:
                    print(f"    计算方法: {details['计算方法']}")
                if '计算公式' in details:
                    print(f"    计算公式: {details['计算公式']}")
                if '计算示例' in details:
                    print(f"    计算示例: {details['计算示例']}")
                if '影响因素' in details:
                    print(f"    影响因素: {details['影响因素']}")
                if '最小值' in details:
                    print(f"    最小值: {details['最小值']}")
                if '相关参数' in details:
                    print(f"    相关参数: {details['相关参数']}")
                if '使用场景' in details:
                    print(f"    使用场景: {details['使用场景']}")
                
                if '注意事项' in details:
                    print(f"    注意事项:")
                    for note in details['注意事项']:
                        print(f"      • {note}")
                
                print(f"    配置示例:{details['配置示例']}")
    
    def show_monitoring(self):
        """显示监控方法"""
        print("\n内存监控:")
        print("=" * 40)
        
        for category, content in self.memory_monitoring.items():
            print(f"\n{category}:{content}")
    
    def show_scripts(self):
        """显示优化脚本"""
        print("\n优化脚本:")
        print("=" * 40)
        
        for script_name, content in self.optimization_scripts.items():
            print(f"\n{script_name}:{content}")

# 内存优化演示
memory_optimization_demo = MySQLMemoryOptimization()
memory_optimization_demo.show_memory_parameters()
memory_optimization_demo.show_monitoring()
memory_optimization_demo.show_scripts()

9.3.2 I/O配置优化

class MySQLIOOptimization:
    def __init__(self):
        self.io_parameters = {
            "InnoDB日志配置": {
                "innodb_log_file_size": {
                    "描述": "InnoDB重做日志文件大小",
                    "推荐值": "缓冲池大小的25%,最大2GB(MySQL 5.6前)",
                    "计算方法": "innodb_buffer_pool_size * 0.25",
                    "影响因素": [
                        "写入负载大小",
                        "恢复时间要求",
                        "检查点频率"
                    ],
                    "配置示例": """
# MySQL 5.6及以前版本
[mysqld]
innodb_log_file_size = 512M
innodb_log_files_in_group = 2

# MySQL 5.7+(支持更大的日志文件)
innodb_log_file_size = 2G
innodb_log_files_in_group = 2

# MySQL 8.0+(支持在线调整)
SET GLOBAL innodb_redo_log_capacity = 4294967296;  -- 4GB

# 监控日志文件使用
SHOW ENGINE INNODB STATUS\G
# 查看 "Log sequence number" 和 "Last checkpoint at"
"""
                },
                "innodb_flush_log_at_trx_commit": {
                    "描述": "事务提交时日志刷新策略",
                    "选项说明": {
                        "0": "每秒刷新一次,性能最好但可能丢失1秒数据",
                        "1": "每次事务提交都刷新,最安全但性能较差",
                        "2": "每次提交写入OS缓存,每秒刷新到磁盘"
                    },
                    "推荐值": "1(ACID合规)或2(性能优先)",
                    "配置示例": """
# 最高安全性(默认)
innodb_flush_log_at_trx_commit = 1

# 平衡性能和安全性
innodb_flush_log_at_trx_commit = 2

# 最高性能(可能丢失数据)
innodb_flush_log_at_trx_commit = 0

# 监控日志刷新
SHOW GLOBAL STATUS LIKE 'Innodb_os_log_fsyncs';
SHOW GLOBAL STATUS LIKE 'Innodb_log_writes';
"""
                },
                "sync_binlog": {
                    "描述": "二进制日志同步到磁盘的频率",
                    "推荐值": "1(主库)或0(从库,性能优先)",
                    "注意事项": "与innodb_flush_log_at_trx_commit配合使用",
                    "配置示例": """
# 主库配置(最高安全性)
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1

# 从库配置(性能优先)
sync_binlog = 0
innodb_flush_log_at_trx_commit = 2

# 监控二进制日志同步
SHOW GLOBAL STATUS LIKE 'Binlog_cache_use';
SHOW GLOBAL STATUS LIKE 'Binlog_cache_disk_use';
"""
                }
            },
            "InnoDB刷新配置": {
                "innodb_flush_method": {
                    "描述": "InnoDB数据和日志文件的刷新方法",
                    "选项说明": {
                        "fsync": "默认方法,使用fsync()刷新",
                        "O_DSYNC": "使用O_DSYNC打开日志文件",
                        "O_DIRECT": "使用O_DIRECT打开数据文件,避免OS缓存",
                        "O_DIRECT_NO_FSYNC": "O_DIRECT + 不对数据文件fsync"
                    },
                    "推荐值": "O_DIRECT(Linux)",
                    "配置示例": """
# Linux系统推荐配置
innodb_flush_method = O_DIRECT

# Windows系统
innodb_flush_method = unbuffered

# 监控I/O操作
SHOW ENGINE INNODB STATUS\G
# 查看 "Pending normal aio reads" 和 "Pending flushes"
"""
                },
                "innodb_io_capacity": {
                    "描述": "InnoDB后台I/O操作的容量限制",
                    "推荐值": "SSD: 2000-20000, HDD: 200-2000",
                    "计算方法": "根据存储设备IOPS能力设置",
                    "配置示例": """
# SSD存储配置
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000

# HDD存储配置
innodb_io_capacity = 400
innodb_io_capacity_max = 800

# 测试存储IOPS
# fio --name=random-write --ioengine=posixaio --rw=randwrite --bs=4k --size=4g --numjobs=1 --iodepth=1 --runtime=60 --time_based --end_fsync=1

# 监控I/O容量使用
SHOW ENGINE INNODB STATUS\G
# 查看 "Pending normal aio reads/writes"
"""
                },
                "innodb_read_io_threads": {
                    "描述": "InnoDB读I/O线程数",
                    "推荐值": "4-16(根据CPU核心数)",
                    "相关参数": "innodb_write_io_threads",
                    "配置示例": """
# 基础配置
innodb_read_io_threads = 8
innodb_write_io_threads = 8

# 高I/O负载配置
innodb_read_io_threads = 16
innodb_write_io_threads = 16

# 查看I/O线程状态
SHOW ENGINE INNODB STATUS\G
# 查看 "FILE I/O" 部分
"""
                }
            },
            "文件系统配置": {
                "innodb_file_per_table": {
                    "描述": "每个表使用独立的表空间文件",
                    "推荐值": "ON(MySQL 5.6+默认)",
                    "优势": [
                        "便于表空间管理",
                        "支持表压缩",
                        "便于备份恢复",
                        "减少系统表空间大小"
                    ],
                    "配置示例": """
# 启用独立表空间(推荐)
innodb_file_per_table = ON

# 查看表空间信息
SELECT 
    TABLE_SCHEMA,
    TABLE_NAME,
    ENGINE,
    TABLE_ROWS,
    DATA_LENGTH/1024/1024 as data_mb,
    INDEX_LENGTH/1024/1024 as index_mb
FROM information_schema.TABLES 
WHERE ENGINE = 'InnoDB'
ORDER BY (DATA_LENGTH + INDEX_LENGTH) DESC;
"""
                },
                "innodb_autoextend_increment": {
                    "描述": "表空间自动扩展的增量大小",
                    "推荐值": "64MB-256MB",
                    "单位": "MB",
                    "配置示例": """
# 基础配置
innodb_autoextend_increment = 64

# 大表场景
innodb_autoextend_increment = 256

# 监控表空间扩展
SELECT 
    FILE_NAME,
    TABLESPACE_NAME,
    INITIAL_SIZE/1024/1024 as initial_mb,
    TOTAL_EXTENTS,
    EXTENT_SIZE,
    AUTOEXTEND_SIZE/1024/1024 as autoextend_mb
FROM information_schema.FILES 
WHERE FILE_TYPE = 'TABLESPACE';
"""
                }
            }
        }
        
        self.io_monitoring = {
            "I/O性能监控": """
-- 查看InnoDB I/O统计
SELECT 
    'Data Reads' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_reads'
UNION ALL
SELECT 
    'Data Writes' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_writes'
UNION ALL
SELECT 
    'Data Read (MB)' as metric,
    ROUND(VARIABLE_VALUE/1024/1024, 2) as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_read'
UNION ALL
SELECT 
    'Data Written (MB)' as metric,
    ROUND(VARIABLE_VALUE/1024/1024, 2) as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_written';

-- 查看I/O等待事件
SELECT 
    EVENT_NAME,
    COUNT_STAR,
    SUM_TIMER_WAIT/1000000000 as total_wait_seconds,
    AVG_TIMER_WAIT/1000000000 as avg_wait_seconds
FROM performance_schema.events_waits_summary_global_by_event_name 
WHERE EVENT_NAME LIKE 'wait/io/%'
AND COUNT_STAR > 0
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 10;

-- 查看文件I/O统计
SELECT 
    FILE_NAME,
    EVENT_NAME,
    COUNT_READ,
    COUNT_WRITE,
    SUM_NUMBER_OF_BYTES_READ/1024/1024 as read_mb,
    SUM_NUMBER_OF_BYTES_WRITE/1024/1024 as write_mb
FROM performance_schema.file_summary_by_instance 
WHERE COUNT_READ > 0 OR COUNT_WRITE > 0
ORDER BY (SUM_NUMBER_OF_BYTES_READ + SUM_NUMBER_OF_BYTES_WRITE) DESC
LIMIT 20;
""",
            "日志性能监控": """
-- 查看重做日志性能
SELECT 
    'Log Writes' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_log_writes'
UNION ALL
SELECT 
    'Log Write Requests' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_log_write_requests'
UNION ALL
SELECT 
    'OS Log Fsyncs' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_os_log_fsyncs'
UNION ALL
SELECT 
    'Log Written (MB)' as metric,
    ROUND(VARIABLE_VALUE/1024/1024, 2) as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_os_log_written';

-- 计算日志写入效率
SELECT 
    ROUND(
        (log_writes.VARIABLE_VALUE / log_write_requests.VARIABLE_VALUE) * 100, 2
    ) as log_write_efficiency_percent
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_log_writes') log_writes,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Innodb_log_write_requests') log_write_requests;
"""
        }
        
        self.io_optimization_scripts = {
            "I/O性能测试脚本": """
#!/bin/bash
# MySQL I/O性能测试脚本

echo "=== MySQL I/O Performance Test ==="
echo "Started at: $(date)"
echo

# 检查存储设备类型
echo "=== Storage Device Information ==="
lsblk -d -o name,rota,type,size
echo "ROTA=1: HDD, ROTA=0: SSD"
echo

# 测试随机读IOPS
echo "=== Random Read IOPS Test ==="
fio --name=random-read --ioengine=libaio --rw=randread --bs=4k --size=1g --numjobs=4 --iodepth=32 --runtime=60 --time_based --group_reporting
echo

# 测试随机写IOPS
echo "=== Random Write IOPS Test ==="
fio --name=random-write --ioengine=libaio --rw=randwrite --bs=4k --size=1g --numjobs=4 --iodepth=32 --runtime=60 --time_based --group_reporting
echo

# 测试顺序读带宽
echo "=== Sequential Read Bandwidth Test ==="
fio --name=seq-read --ioengine=libaio --rw=read --bs=1m --size=4g --numjobs=1 --iodepth=1 --runtime=60 --time_based
echo

# 测试顺序写带宽
echo "=== Sequential Write Bandwidth Test ==="
fio --name=seq-write --ioengine=libaio --rw=write --bs=1m --size=4g --numjobs=1 --iodepth=1 --runtime=60 --time_based
echo

echo "Test completed at: $(date)"
""",
            "I/O配置优化脚本": """
#!/bin/bash
# MySQL I/O配置优化脚本

MYSQL_CNF="/etc/mysql/mysql.conf.d/mysqld.cnf"
BACKUP_CNF="/etc/mysql/mysql.conf.d/mysqld.cnf.backup.$(date +%Y%m%d_%H%M%S)"

echo "=== MySQL I/O Configuration Optimizer ==="
echo

# 备份原配置文件
cp "$MYSQL_CNF" "$BACKUP_CNF"
echo "Original configuration backed up to: $BACKUP_CNF"

# 检测存储类型
STORAGE_TYPE="unknown"
if lsblk -d -o name,rota | grep -q "0"; then
    STORAGE_TYPE="ssd"
    echo "Detected SSD storage"
elif lsblk -d -o name,rota | grep -q "1"; then
    STORAGE_TYPE="hdd"
    echo "Detected HDD storage"
fi

# 获取系统内存
TOTAL_RAM=$(free -g | awk 'NR==2{print $2}')
BUFFER_POOL_SIZE=$((TOTAL_RAM * 70 / 100))

echo "Total RAM: ${TOTAL_RAM}GB"
echo "Buffer Pool Size: ${BUFFER_POOL_SIZE}GB"
echo

# 生成I/O优化配置
cat >> "$MYSQL_CNF" << EOF

# I/O Optimization Configuration
# Generated on $(date)

# InnoDB Log Configuration
innodb_log_file_size = $(($BUFFER_POOL_SIZE * 25 / 100))G
innodb_log_files_in_group = 2
innodb_log_buffer_size = 64M

# Flush Configuration
innodb_flush_log_at_trx_commit = 1
sync_binlog = 1
innodb_flush_method = O_DIRECT

# I/O Capacity Configuration
EOF

if [ "$STORAGE_TYPE" = "ssd" ]; then
    cat >> "$MYSQL_CNF" << EOF
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
EOF
else
    cat >> "$MYSQL_CNF" << EOF
innodb_io_capacity = 400
innodb_io_capacity_max = 800
EOF
fi

cat >> "$MYSQL_CNF" << EOF

# I/O Threads
innodb_read_io_threads = 8
innodb_write_io_threads = 8

# File Configuration
innodb_file_per_table = ON
innodb_autoextend_increment = 64
EOF

echo "I/O optimization configuration added to $MYSQL_CNF"
echo "Please restart MySQL to apply the changes."
echo
echo "To restart MySQL:"
echo "sudo systemctl restart mysql"
""",
            "I/O监控脚本": """
#!/bin/bash
# MySQL I/O监控脚本

MYSQL_USER="monitor"
MYSQL_PASS="password"
MYSQL_HOST="localhost"

echo "=== MySQL I/O Performance Report ==="
echo "Generated at: $(date)"
echo

# 系统I/O统计
echo "=== System I/O Statistics ==="
iostat -x 1 3
echo

# MySQL I/O统计
echo "=== MySQL I/O Statistics ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    'Data Reads' as metric,
    VARIABLE_VALUE as value,
    'operations' as unit
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_reads'
UNION ALL
SELECT 
    'Data Writes' as metric,
    VARIABLE_VALUE as value,
    'operations' as unit
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_writes'
UNION ALL
SELECT 
    'Data Read' as metric,
    ROUND(VARIABLE_VALUE/1024/1024, 2) as value,
    'MB' as unit
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_read'
UNION ALL
SELECT 
    'Data Written' as metric,
    ROUND(VARIABLE_VALUE/1024/1024, 2) as value,
    'MB' as unit
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_data_written';"
echo

# 日志I/O统计
echo "=== Log I/O Statistics ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    'Log Writes' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_log_writes'
UNION ALL
SELECT 
    'Log Write Requests' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_log_write_requests'
UNION ALL
SELECT 
    'OS Log Fsyncs' as metric,
    VARIABLE_VALUE as value
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Innodb_os_log_fsyncs';"
echo

# I/O等待事件
echo "=== Top I/O Wait Events ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    EVENT_NAME,
    COUNT_STAR,
    ROUND(SUM_TIMER_WAIT/1000000000, 2) as total_wait_seconds,
    ROUND(AVG_TIMER_WAIT/1000000000, 4) as avg_wait_seconds
FROM performance_schema.events_waits_summary_global_by_event_name 
WHERE EVENT_NAME LIKE 'wait/io/%'
AND COUNT_STAR > 0
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 10;"
"""
        }
    
    def show_io_parameters(self):
        """显示I/O参数"""
        print("MySQL I/O配置参数:")
        print("=" * 60)
        
        for category, parameters in self.io_parameters.items():
            print(f"\n{category}:")
            
            for param, details in parameters.items():
                print(f"\n  {param}:")
                print(f"    描述: {details['描述']}")
                
                if '推荐值' in details:
                    print(f"    推荐值: {details['推荐值']}")
                if '计算方法' in details:
                    print(f"    计算方法: {details['计算方法']}")
                if '单位' in details:
                    print(f"    单位: {details['单位']}")
                if '相关参数' in details:
                    print(f"    相关参数: {details['相关参数']}")
                if '注意事项' in details:
                    print(f"    注意事项: {details['注意事项']}")
                
                if '选项说明' in details:
                    print(f"    选项说明:")
                    for option, desc in details['选项说明'].items():
                        print(f"      {option}: {desc}")
                
                if '影响因素' in details:
                    print(f"    影响因素:")
                    for factor in details['影响因素']:
                        print(f"      • {factor}")
                
                if '优势' in details:
                    print(f"    优势:")
                    for advantage in details['优势']:
                        print(f"      • {advantage}")
                
                print(f"    配置示例:{details['配置示例']}")
    
    def show_monitoring(self):
        """显示监控方法"""
        print("\nI/O监控:")
        print("=" * 40)
        
        for category, content in self.io_monitoring.items():
            print(f"\n{category}:{content}")
    
    def show_scripts(self):
        """显示优化脚本"""
        print("\nI/O优化脚本:")
        print("=" * 40)
        
        for script_name, content in self.io_optimization_scripts.items():
            print(f"\n{script_name}:{content}")

# I/O优化演示
io_optimization_demo = MySQLIOOptimization()
io_optimization_demo.show_io_parameters()
io_optimization_demo.show_monitoring()
io_optimization_demo.show_scripts()

9.3.3 连接配置优化

class MySQLConnectionOptimization:
    def __init__(self):
        self.connection_parameters = {
            "连接数配置": {
                "max_connections": {
                    "描述": "最大并发连接数",
                    "推荐值": "根据应用需求和系统资源计算",
                    "计算方法": "(可用内存 - 全局缓存) / 每连接内存使用",
                    "影响因素": [
                        "应用并发用户数",
                        "连接池大小",
                        "系统内存容量",
                        "每连接内存使用量"
                    ],
                    "配置示例": """
# 基础Web应用
max_connections = 500

# 高并发应用
max_connections = 2000

# 大型企业应用
max_connections = 5000

# 监控连接使用情况
SHOW STATUS LIKE 'Connections';
SHOW STATUS LIKE 'Max_used_connections';
SHOW STATUS LIKE 'Threads_connected';

# 计算连接使用率
SELECT 
    @@max_connections as max_connections,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Threads_connected') as current_connections,
    ROUND(
        (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
         WHERE VARIABLE_NAME = 'Threads_connected') / @@max_connections * 100, 2
    ) as connection_usage_percent;
"""
                },
                "max_user_connections": {
                    "描述": "每个用户的最大连接数",
                    "推荐值": "max_connections的10-20%",
                    "用途": "防止单个用户占用过多连接",
                    "配置示例": """
# 基础配置
max_user_connections = 100

# 为特定用户设置连接限制
CREATE USER 'app_user'@'%' IDENTIFIED BY 'password';
ALTER USER 'app_user'@'%' WITH MAX_USER_CONNECTIONS 50;

# 查看用户连接限制
SELECT 
    User,
    Host,
    max_user_connections
FROM mysql.user 
WHERE max_user_connections > 0;

# 监控用户连接使用
SELECT 
    USER,
    HOST,
    COUNT(*) as connection_count
FROM information_schema.PROCESSLIST 
GROUP BY USER, HOST
ORDER BY connection_count DESC;
"""
                },
                "max_connect_errors": {
                    "描述": "主机连接错误的最大次数",
                    "推荐值": "100000-1000000",
                    "作用": "防止暴力破解攻击",
                    "配置示例": """
# 基础配置
max_connect_errors = 100000

# 高安全要求
max_connect_errors = 10000

# 查看被阻止的主机
SHOW STATUS LIKE 'Aborted_connects';
SELECT * FROM performance_schema.host_cache;

# 清除主机缓存
FLUSH HOSTS;
"""
                }
            },
            "连接超时配置": {
                "connect_timeout": {
                    "描述": "连接建立超时时间",
                    "推荐值": "10-30秒",
                    "单位": "秒",
                    "配置示例": """
# 基础配置
connect_timeout = 10

# 网络较慢的环境
connect_timeout = 30

# 监控连接超时
SHOW STATUS LIKE 'Aborted_connects';
"""
                },
                "wait_timeout": {
                    "描述": "非交互式连接的超时时间",
                    "推荐值": "300-3600秒(5分钟-1小时)",
                    "影响": "过长会占用连接资源,过短会频繁重连",
                    "配置示例": """
# Web应用配置
wait_timeout = 600  # 10分钟

# 长时间运行的应用
wait_timeout = 3600  # 1小时

# 短连接应用
wait_timeout = 300   # 5分钟

# 监控连接超时
SHOW STATUS LIKE 'Aborted_clients';
"""
                },
                "interactive_timeout": {
                    "描述": "交互式连接的超时时间",
                    "推荐值": "与wait_timeout相同或更长",
                    "适用场景": "mysql命令行客户端",
                    "配置示例": """
# 基础配置
interactive_timeout = 3600  # 1小时

# 开发环境(更长的超时)
interactive_timeout = 7200  # 2小时

# 查看当前会话超时设置
SHOW VARIABLES LIKE '%timeout%';
"""
                },
                "net_read_timeout": {
                    "描述": "从连接读取数据的超时时间",
                    "推荐值": "30-120秒",
                    "相关参数": "net_write_timeout",
                    "配置示例": """
# 基础配置
net_read_timeout = 30
net_write_timeout = 60

# 大数据传输场景
net_read_timeout = 120
net_write_timeout = 120

# 监控网络超时
SHOW STATUS LIKE 'Aborted_clients';
"""
                }
            },
            "连接池配置": {
                "thread_cache_size": {
                    "描述": "线程缓存大小,复用连接线程",
                    "推荐值": "8 + (max_connections / 100)",
                    "优化目标": "减少线程创建开销",
                    "配置示例": """
# 计算推荐值
# max_connections = 1000 时
thread_cache_size = 18

# max_connections = 2000 时
thread_cache_size = 28

# 监控线程缓存效率
SHOW STATUS LIKE 'Threads_%';
SHOW STATUS LIKE 'Connections';

# 计算线程缓存命中率
SELECT 
    connections.VARIABLE_VALUE as total_connections,
    threads_created.VARIABLE_VALUE as threads_created,
    ROUND(
        (connections.VARIABLE_VALUE - threads_created.VARIABLE_VALUE) / 
        connections.VARIABLE_VALUE * 100, 2
    ) as thread_cache_hit_rate_percent
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Connections') connections,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Threads_created') threads_created;
"""
                },
                "back_log": {
                    "描述": "连接请求队列大小",
                    "推荐值": "50 + (max_connections / 5)",
                    "最大值": "65535",
                    "配置示例": """
# 基础配置(max_connections = 1000)
back_log = 250

# 高并发配置(max_connections = 2000)
back_log = 450

# 查看连接队列状态
SHOW STATUS LIKE 'Aborted_connects';
"""
                }
            }
        }
        
        self.connection_monitoring = {
            "连接状态监控": """
-- 查看连接统计信息
SELECT 
    'Max Connections' as metric,
    @@max_connections as value
UNION ALL
SELECT 
    'Current Connections' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Threads_connected') as value
UNION ALL
SELECT 
    'Max Used Connections' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Max_used_connections') as value
UNION ALL
SELECT 
    'Total Connections' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Connections') as value
UNION ALL
SELECT 
    'Aborted Connections' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Aborted_connects') as value
UNION ALL
SELECT 
    'Aborted Clients' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Aborted_clients') as value;

-- 查看当前连接详情
SELECT 
    ID,
    USER,
    HOST,
    DB,
    COMMAND,
    TIME,
    STATE,
    LEFT(INFO, 100) as QUERY_PREVIEW
FROM information_schema.PROCESSLIST 
WHERE COMMAND != 'Sleep'
ORDER BY TIME DESC;

-- 按用户统计连接
SELECT 
    USER,
    COUNT(*) as connection_count,
    AVG(TIME) as avg_time,
    MAX(TIME) as max_time
FROM information_schema.PROCESSLIST 
GROUP BY USER
ORDER BY connection_count DESC;

-- 按主机统计连接
SELECT 
    SUBSTRING_INDEX(HOST, ':', 1) as client_host,
    COUNT(*) as connection_count
FROM information_schema.PROCESSLIST 
GROUP BY SUBSTRING_INDEX(HOST, ':', 1)
ORDER BY connection_count DESC;
""",
            "连接性能分析": """
-- 分析连接建立性能
SELECT 
    EVENT_NAME,
    COUNT_STAR as connection_count,
    ROUND(SUM_TIMER_WAIT/1000000000, 2) as total_time_seconds,
    ROUND(AVG_TIMER_WAIT/1000000000, 4) as avg_time_seconds,
    ROUND(MAX_TIMER_WAIT/1000000000, 4) as max_time_seconds
FROM performance_schema.events_waits_summary_global_by_event_name 
WHERE EVENT_NAME LIKE 'wait/io/socket/%'
AND COUNT_STAR > 0
ORDER BY SUM_TIMER_WAIT DESC;

-- 分析连接错误
SELECT 
    IP,
    HOST,
    HOST_VALIDATED,
    SUM_CONNECT_ERRORS,
    COUNT_HOST_BLOCKED_ERRORS,
    COUNT_NAMEINFO_TRANSIENT_ERRORS,
    COUNT_NAMEINFO_PERMANENT_ERRORS,
    COUNT_FORMAT_ERRORS,
    COUNT_ADDRINFO_TRANSIENT_ERRORS,
    COUNT_ADDRINFO_PERMANENT_ERRORS,
    COUNT_FCRDNS_ERRORS,
    COUNT_HOST_ACL_ERRORS,
    COUNT_NO_AUTH_PLUGIN_ERRORS,
    COUNT_AUTH_PLUGIN_ERRORS,
    COUNT_HANDSHAKE_ERRORS,
    COUNT_PROXY_USER_ERRORS,
    COUNT_PROXY_USER_ACL_ERRORS,
    COUNT_AUTHENTICATION_ERRORS,
    COUNT_SSL_ERRORS,
    COUNT_MAX_USER_CONNECTIONS_ERRORS,
    COUNT_MAX_USER_CONNECTIONS_PER_HOUR_ERRORS,
    COUNT_DEFAULT_DATABASE_ERRORS,
    COUNT_INIT_CONNECT_ERRORS,
    COUNT_LOCAL_ERRORS,
    COUNT_UNKNOWN_ERRORS
FROM performance_schema.host_cache
WHERE SUM_CONNECT_ERRORS > 0
ORDER BY SUM_CONNECT_ERRORS DESC;
"""
        }
        
        self.connection_optimization_scripts = {
            "连接配置优化脚本": """
#!/bin/bash
# MySQL连接配置优化脚本

MYSQL_CNF="/etc/mysql/mysql.conf.d/mysqld.cnf"
BACKUP_CNF="/etc/mysql/mysql.conf.d/mysqld.cnf.backup.$(date +%Y%m%d_%H%M%S)"

echo "=== MySQL Connection Configuration Optimizer ==="
echo

# 备份原配置文件
cp "$MYSQL_CNF" "$BACKUP_CNF"
echo "Original configuration backed up to: $BACKUP_CNF"

# 获取系统信息
TOTAL_RAM=$(free -g | awk 'NR==2{print $2}')
CPU_CORES=$(nproc)

echo "System Information:"
echo "  Total RAM: ${TOTAL_RAM}GB"
echo "  CPU Cores: ${CPU_CORES}"
echo

# 计算连接配置
if [ $TOTAL_RAM -le 4 ]; then
    MAX_CONNECTIONS=500
    THREAD_CACHE_SIZE=13
elif [ $TOTAL_RAM -le 8 ]; then
    MAX_CONNECTIONS=1000
    THREAD_CACHE_SIZE=18
elif [ $TOTAL_RAM -le 16 ]; then
    MAX_CONNECTIONS=2000
    THREAD_CACHE_SIZE=28
else
    MAX_CONNECTIONS=3000
    THREAD_CACHE_SIZE=38
fi

BACK_LOG=$((50 + MAX_CONNECTIONS / 5))
MAX_USER_CONNECTIONS=$((MAX_CONNECTIONS / 5))

echo "Calculated Configuration:"
echo "  max_connections: $MAX_CONNECTIONS"
echo "  thread_cache_size: $THREAD_CACHE_SIZE"
echo "  back_log: $BACK_LOG"
echo "  max_user_connections: $MAX_USER_CONNECTIONS"
echo

# 生成连接优化配置
cat >> "$MYSQL_CNF" << EOF

# Connection Optimization Configuration
# Generated on $(date)

# Connection Limits
max_connections = $MAX_CONNECTIONS
max_user_connections = $MAX_USER_CONNECTIONS
max_connect_errors = 100000

# Connection Timeouts
connect_timeout = 10
wait_timeout = 600
interactive_timeout = 3600
net_read_timeout = 30
net_write_timeout = 60

# Connection Pool
thread_cache_size = $THREAD_CACHE_SIZE
back_log = $BACK_LOG
EOF

echo "Connection optimization configuration added to $MYSQL_CNF"
echo "Please restart MySQL to apply the changes."
echo
echo "To restart MySQL:"
echo "sudo systemctl restart mysql"
""",
            "连接监控脚本": """
#!/bin/bash
# MySQL连接监控脚本

MYSQL_USER="monitor"
MYSQL_PASS="password"
MYSQL_HOST="localhost"

echo "=== MySQL Connection Monitoring Report ==="
echo "Generated at: $(date)"
echo

# 连接统计
echo "=== Connection Statistics ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    'Max Connections' as metric,
    @@max_connections as value
UNION ALL
SELECT 
    'Current Connections' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Threads_connected') as value
UNION ALL
SELECT 
    'Max Used Connections' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Max_used_connections') as value
UNION ALL
SELECT 
    'Connection Usage %' as metric,
    ROUND(
        (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
         WHERE VARIABLE_NAME = 'Threads_connected') / @@max_connections * 100, 2
    ) as value;"
echo

# 线程缓存效率
echo "=== Thread Cache Efficiency ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    connections.VARIABLE_VALUE as total_connections,
    threads_created.VARIABLE_VALUE as threads_created,
    ROUND(
        (connections.VARIABLE_VALUE - threads_created.VARIABLE_VALUE) / 
        connections.VARIABLE_VALUE * 100, 2
    ) as thread_cache_hit_rate_percent
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Connections') connections,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Threads_created') threads_created;"
echo

# 连接错误统计
echo "=== Connection Errors ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    'Aborted Connections' as error_type,
    VARIABLE_VALUE as count
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Aborted_connects'
UNION ALL
SELECT 
    'Aborted Clients' as error_type,
    VARIABLE_VALUE as count
FROM performance_schema.global_status 
WHERE VARIABLE_NAME = 'Aborted_clients';"
echo

# 当前活跃连接
echo "=== Active Connections ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    USER,
    COUNT(*) as connection_count,
    AVG(TIME) as avg_time_seconds,
    MAX(TIME) as max_time_seconds
FROM information_schema.PROCESSLIST 
WHERE COMMAND != 'Sleep'
GROUP BY USER
ORDER BY connection_count DESC;"
echo

# 长时间运行的查询
echo "=== Long Running Queries ==="
mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MYSQL_HOST -e "
SELECT 
    ID,
    USER,
    HOST,
    DB,
    COMMAND,
    TIME as duration_seconds,
    STATE,
    LEFT(INFO, 100) as query_preview
FROM information_schema.PROCESSLIST 
WHERE TIME > 60
AND COMMAND != 'Sleep'
ORDER BY TIME DESC;"
""",
            "连接池配置示例": """
# HikariCP (Java) 连接池配置示例
# application.properties
spring.datasource.hikari.maximum-pool-size=20
spring.datasource.hikari.minimum-idle=5
spring.datasource.hikari.connection-timeout=30000
spring.datasource.hikari.idle-timeout=600000
spring.datasource.hikari.max-lifetime=1800000
spring.datasource.hikari.leak-detection-threshold=60000

# C3P0 (Java) 连接池配置示例
c3p0.maxPoolSize=20
c3p0.minPoolSize=5
c3p0.initialPoolSize=5
c3p0.maxIdleTime=600
c3p0.acquireIncrement=2
c3p0.checkoutTimeout=30000
c3p0.idleConnectionTestPeriod=300

# Python SQLAlchemy 连接池配置示例
from sqlalchemy import create_engine
from sqlalchemy.pool import QueuePool

engine = create_engine(
    'mysql+pymysql://user:password@localhost/database',
    poolclass=QueuePool,
    pool_size=10,
    max_overflow=20,
    pool_pre_ping=True,
    pool_recycle=3600
)

# Node.js mysql2 连接池配置示例
const mysql = require('mysql2');

const pool = mysql.createPool({
    host: 'localhost',
    user: 'user',
    password: 'password',
    database: 'database',
    connectionLimit: 10,
    queueLimit: 0,
    acquireTimeout: 60000,
    timeout: 60000,
    reconnect: true
});

# PHP PDO 连接池配置示例
$dsn = 'mysql:host=localhost;dbname=database;charset=utf8mb4';
$options = [
    PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
    PDO::ATTR_DEFAULT_FETCH_MODE => PDO::FETCH_ASSOC,
    PDO::ATTR_EMULATE_PREPARES => false,
    PDO::ATTR_PERSISTENT => true,  // 启用持久连接
    PDO::MYSQL_ATTR_INIT_COMMAND => "SET NAMES utf8mb4"
];

$pdo = new PDO($dsn, $username, $password, $options);
"""
        }
    
    def show_connection_parameters(self):
        """显示连接参数"""
        print("MySQL连接配置参数:")
        print("=" * 60)
        
        for category, parameters in self.connection_parameters.items():
            print(f"\n{category}:")
            
            for param, details in parameters.items():
                print(f"\n  {param}:")
                print(f"    描述: {details['描述']}")
                
                if '推荐值' in details:
                    print(f"    推荐值: {details['推荐值']}")
                if '计算方法' in details:
                    print(f"    计算方法: {details['计算方法']}")
                if '单位' in details:
                    print(f"    单位: {details['单位']}")
                if '用途' in details:
                    print(f"    用途: {details['用途']}")
                if '作用' in details:
                    print(f"    作用: {details['作用']}")
                if '影响' in details:
                    print(f"    影响: {details['影响']}")
                if '适用场景' in details:
                    print(f"    适用场景: {details['适用场景']}")
                if '相关参数' in details:
                    print(f"    相关参数: {details['相关参数']}")
                if '优化目标' in details:
                    print(f"    优化目标: {details['优化目标']}")
                if '最大值' in details:
                    print(f"    最大值: {details['最大值']}")
                
                if '影响因素' in details:
                    print(f"    影响因素:")
                    for factor in details['影响因素']:
                        print(f"      • {factor}")
                
                print(f"    配置示例:{details['配置示例']}")
    
    def show_monitoring(self):
        """显示监控方法"""
        print("\n连接监控:")
        print("=" * 40)
        
        for category, content in self.connection_monitoring.items():
            print(f"\n{category}:{content}")
    
    def show_scripts(self):
        """显示优化脚本"""
        print("\n连接优化脚本:")
        print("=" * 40)
        
        for script_name, content in self.connection_optimization_scripts.items():
            print(f"\n{script_name}:{content}")

# 连接优化演示
connection_optimization_demo = MySQLConnectionOptimization()
connection_optimization_demo.show_connection_parameters()
connection_optimization_demo.show_monitoring()
connection_optimization_demo.show_scripts()

9.3.4 查询缓存配置优化

class MySQLQueryCacheOptimization:
    def __init__(self):
        self.query_cache_parameters = {
            "查询缓存基础配置": {
                "query_cache_type": {
                    "描述": "查询缓存类型控制",
                    "选项说明": {
                        "0 (OFF)": "完全禁用查询缓存",
                        "1 (ON)": "启用查询缓存,除非查询包含SQL_NO_CACHE",
                        "2 (DEMAND)": "仅缓存包含SQL_CACHE的查询"
                    },
                    "推荐值": "0(MySQL 5.7.20+已弃用)",
                    "注意事项": "MySQL 8.0已完全移除查询缓存",
                    "配置示例": """
# MySQL 5.6/5.7 配置
# 禁用查询缓存(推荐)
query_cache_type = 0
query_cache_size = 0

# 启用查询缓存(不推荐)
query_cache_type = 1
query_cache_size = 128M
query_cache_limit = 2M

# 按需缓存
query_cache_type = 2
query_cache_size = 64M

# 查询缓存状态
SHOW VARIABLES LIKE 'query_cache%';
SHOW STATUS LIKE 'Qcache%';

# 使用SQL_CACHE提示
SELECT SQL_CACHE * FROM users WHERE status = 'active';

# 使用SQL_NO_CACHE提示
SELECT SQL_NO_CACHE * FROM users WHERE id = 1;
"""
                },
                "query_cache_size": {
                    "描述": "查询缓存内存大小",
                    "推荐值": "0(禁用)或64MB-256MB",
                    "计算方法": "根据查询模式和可用内存确定",
                    "影响因素": [
                        "查询重复率",
                        "表更新频率",
                        "可用内存大小",
                        "并发查询数量"
                    ],
                    "配置示例": """
# 禁用查询缓存(推荐)
query_cache_size = 0

# 小型应用
query_cache_size = 64M

# 中型应用
query_cache_size = 128M

# 大型应用(谨慎使用)
query_cache_size = 256M

# 监控缓存使用
SELECT 
    @@query_cache_size / 1024 / 1024 as cache_size_mb,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_free_memory') / 1024 / 1024 as free_memory_mb,
    ROUND(
        (@@query_cache_size - 
         (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
          WHERE VARIABLE_NAME = 'Qcache_free_memory')) / @@query_cache_size * 100, 2
    ) as usage_percent;
"""
                },
                "query_cache_limit": {
                    "描述": "单个查询结果的最大缓存大小",
                    "推荐值": "1MB-4MB",
                    "用途": "防止大结果集占用过多缓存空间",
                    "配置示例": """
# 基础配置
query_cache_limit = 2M

# 大结果集场景
query_cache_limit = 4M

# 小结果集场景
query_cache_limit = 1M

# 监控被拒绝的查询
SHOW STATUS LIKE 'Qcache_not_cached';
SHOW STATUS LIKE 'Qcache_queries_in_cache';
"""
                },
                "query_cache_min_res_unit": {
                    "描述": "查询缓存分配的最小内存单元",
                    "推荐值": "2KB-8KB",
                    "优化目标": "减少内存碎片",
                    "配置示例": """
# 小结果集优化
query_cache_min_res_unit = 2K

# 大结果集优化
query_cache_min_res_unit = 8K

# 平衡配置
query_cache_min_res_unit = 4K

# 监控内存碎片
SHOW STATUS LIKE 'Qcache_free_blocks';
SHOW STATUS LIKE 'Qcache_total_blocks';

# 计算碎片率
SELECT 
    free_blocks.VARIABLE_VALUE as free_blocks,
    total_blocks.VARIABLE_VALUE as total_blocks,
    ROUND(
        free_blocks.VARIABLE_VALUE / total_blocks.VARIABLE_VALUE * 100, 2
    ) as fragmentation_percent
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_free_blocks') free_blocks,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_total_blocks') total_blocks;
"""
                }
            }
        }
        
        self.query_cache_monitoring = {
            "查询缓存性能监控": """
-- 查询缓存统计信息
SELECT 
    'Cache Size (MB)' as metric,
    @@query_cache_size / 1024 / 1024 as value
UNION ALL
SELECT 
    'Cache Type' as metric,
    CASE @@query_cache_type
        WHEN 0 THEN 'OFF'
        WHEN 1 THEN 'ON'
        WHEN 2 THEN 'DEMAND'
    END as value
UNION ALL
SELECT 
    'Cache Limit (MB)' as metric,
    @@query_cache_limit / 1024 / 1024 as value
UNION ALL
SELECT 
    'Queries in Cache' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_queries_in_cache') as value
UNION ALL
SELECT 
    'Cache Hits' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_hits') as value
UNION ALL
SELECT 
    'Cache Inserts' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_inserts') as value;

-- 计算缓存命中率
SELECT 
    hits.VARIABLE_VALUE as cache_hits,
    inserts.VARIABLE_VALUE as cache_inserts,
    not_cached.VARIABLE_VALUE as not_cached,
    ROUND(
        hits.VARIABLE_VALUE / 
        (hits.VARIABLE_VALUE + inserts.VARIABLE_VALUE + not_cached.VARIABLE_VALUE) * 100, 2
    ) as hit_rate_percent
FROM 
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_hits') hits,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_inserts') inserts,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_not_cached') not_cached;

-- 查询缓存内存使用
SELECT 
    'Total Size (MB)' as metric,
    @@query_cache_size / 1024 / 1024 as value
UNION ALL
SELECT 
    'Free Memory (MB)' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_free_memory') / 1024 / 1024 as value
UNION ALL
SELECT 
    'Used Memory (MB)' as metric,
    (@@query_cache_size - 
     (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
      WHERE VARIABLE_NAME = 'Qcache_free_memory')) / 1024 / 1024 as value
UNION ALL
SELECT 
    'Memory Usage %' as metric,
    ROUND(
        (@@query_cache_size - 
         (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
          WHERE VARIABLE_NAME = 'Qcache_free_memory')) / @@query_cache_size * 100, 2
    ) as value;
""",
            "查询缓存效率分析": """
-- 分析缓存失效原因
SELECT 
    'Lowmem Prunes' as reason,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_lowmem_prunes') as count,
    '内存不足导致的缓存清理' as description
UNION ALL
SELECT 
    'Not Cached' as reason,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_not_cached') as count,
    '无法缓存的查询数量' as description;

-- 缓存碎片分析
SELECT 
    'Total Blocks' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_total_blocks') as value
UNION ALL
SELECT 
    'Free Blocks' as metric,
    (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
     WHERE VARIABLE_NAME = 'Qcache_free_blocks') as value
UNION ALL
SELECT 
    'Fragmentation %' as metric,
    ROUND(
        (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
         WHERE VARIABLE_NAME = 'Qcache_free_blocks') / 
        (SELECT VARIABLE_VALUE FROM performance_schema.global_status 
         WHERE VARIABLE_NAME = 'Qcache_total_blocks') * 100, 2
    ) as value;
"""
        }
        
        self.query_cache_alternatives = {
            "现代缓存替代方案": """
# Redis 查询缓存实现(推荐)
import redis
import json
import hashlib

class RedisQueryCache:
    def __init__(self, redis_host='localhost', redis_port=6379, ttl=3600):
        self.redis_client = redis.Redis(host=redis_host, port=redis_port, decode_responses=True)
        self.ttl = ttl
    
    def get_cache_key(self, query, params=None):
        """生成缓存键"""
        cache_data = {'query': query, 'params': params or {}}
        cache_string = json.dumps(cache_data, sort_keys=True)
        return f"query_cache:{hashlib.md5(cache_string.encode()).hexdigest()}"
    
    def get(self, query, params=None):
        """获取缓存结果"""
        cache_key = self.get_cache_key(query, params)
        cached_result = self.redis_client.get(cache_key)
        if cached_result:
            return json.loads(cached_result)
        return None
    
    def set(self, query, result, params=None, ttl=None):
        """设置缓存结果"""
        cache_key = self.get_cache_key(query, params)
        cache_ttl = ttl or self.ttl
        self.redis_client.setex(cache_key, cache_ttl, json.dumps(result))
    
    def invalidate_pattern(self, pattern):
        """按模式清除缓存"""
        keys = self.redis_client.keys(f"query_cache:*{pattern}*")
        if keys:
            self.redis_client.delete(*keys)

# 使用示例
cache = RedisQueryCache()

# 查询前检查缓存
query = "SELECT * FROM users WHERE status = %s"
params = ['active']
result = cache.get(query, params)

if result is None:
    # 执行数据库查询
    result = execute_query(query, params)
    # 缓存结果
    cache.set(query, result, params, ttl=1800)

return result
""",
            "Memcached 缓存实现": """
# Memcached 查询缓存实现
import memcache
import json
import hashlib

class MemcachedQueryCache:
    def __init__(self, servers=['127.0.0.1:11211'], ttl=3600):
        self.mc = memcache.Client(servers)
        self.ttl = ttl
    
    def get_cache_key(self, query, params=None):
        """生成缓存键"""
        cache_data = {'query': query, 'params': params or {}}
        cache_string = json.dumps(cache_data, sort_keys=True)
        return f"qc_{hashlib.md5(cache_string.encode()).hexdigest()}"
    
    def get(self, query, params=None):
        """获取缓存结果"""
        cache_key = self.get_cache_key(query, params)
        return self.mc.get(cache_key)
    
    def set(self, query, result, params=None, ttl=None):
        """设置缓存结果"""
        cache_key = self.get_cache_key(query, params)
        cache_ttl = ttl or self.ttl
        self.mc.set(cache_key, result, time=cache_ttl)
    
    def delete(self, query, params=None):
        """删除特定缓存"""
        cache_key = self.get_cache_key(query, params)
        self.mc.delete(cache_key)

# 使用示例
cache = MemcachedQueryCache()

# 装饰器实现
def cached_query(ttl=3600):
    def decorator(func):
        def wrapper(query, params=None):
            result = cache.get(query, params)
            if result is None:
                result = func(query, params)
                cache.set(query, result, params, ttl)
            return result
        return wrapper
    return decorator

@cached_query(ttl=1800)
def execute_cached_query(query, params=None):
    return execute_query(query, params)
""",
            "应用层缓存策略": """
# 应用层查询缓存策略

# 1. 基于时间的缓存失效
class TimeBasedCache:
    def __init__(self, ttl=3600):
        self.cache = {}
        self.ttl = ttl
    
    def get(self, key):
        if key in self.cache:
            data, timestamp = self.cache[key]
            if time.time() - timestamp < self.ttl:
                return data
            else:
                del self.cache[key]
        return None
    
    def set(self, key, value):
        self.cache[key] = (value, time.time())

# 2. 基于版本的缓存失效
class VersionBasedCache:
    def __init__(self):
        self.cache = {}
        self.table_versions = {}
    
    def get(self, key, tables):
        if key in self.cache:
            data, versions = self.cache[key]
            # 检查相关表的版本
            if all(self.table_versions.get(table, 0) == versions.get(table, 0) 
                   for table in tables):
                return data
            else:
                del self.cache[key]
        return None
    
    def set(self, key, value, tables):
        versions = {table: self.table_versions.get(table, 0) for table in tables}
        self.cache[key] = (value, versions)
    
    def invalidate_table(self, table):
        self.table_versions[table] = self.table_versions.get(table, 0) + 1
        # 清理相关缓存
        keys_to_delete = []
        for key, (data, versions) in self.cache.items():
            if table in versions:
                keys_to_delete.append(key)
        for key in keys_to_delete:
            del self.cache[key]

# 3. LRU缓存实现
from collections import OrderedDict

class LRUCache:
    def __init__(self, capacity=1000):
        self.cache = OrderedDict()
        self.capacity = capacity
    
    def get(self, key):
        if key in self.cache:
            # 移动到末尾(最近使用)
            self.cache.move_to_end(key)
            return self.cache[key]
        return None
    
    def set(self, key, value):
        if key in self.cache:
            self.cache.move_to_end(key)
        elif len(self.cache) >= self.capacity:
            # 删除最久未使用的项
            self.cache.popitem(last=False)
        self.cache[key] = value
"""
        }
    
    def show_query_cache_parameters(self):
        """显示查询缓存参数"""
        print("MySQL查询缓存配置参数:")
        print("=" * 60)
        
        for category, parameters in self.query_cache_parameters.items():
            print(f"\n{category}:")
            
            for param, details in parameters.items():
                print(f"\n  {param}:")
                print(f"    描述: {details['描述']}")
                
                if '推荐值' in details:
                    print(f"    推荐值: {details['推荐值']}")
                if '计算方法' in details:
                    print(f"    计算方法: {details['计算方法']}")
                if '用途' in details:
                    print(f"    用途: {details['用途']}")
                if '优化目标' in details:
                    print(f"    优化目标: {details['优化目标']}")
                if '注意事项' in details:
                    print(f"    注意事项: {details['注意事项']}")
                
                if '选项说明' in details:
                    print(f"    选项说明:")
                    for option, desc in details['选项说明'].items():
                        print(f"      {option}: {desc}")
                
                if '影响因素' in details:
                    print(f"    影响因素:")
                    for factor in details['影响因素']:
                        print(f"      • {factor}")
                
                print(f"    配置示例:{details['配置示例']}")
    
    def show_monitoring(self):
        """显示监控方法"""
        print("\n查询缓存监控:")
        print("=" * 40)
        
        for category, content in self.query_cache_monitoring.items():
            print(f"\n{category}:{content}")
    
    def show_alternatives(self):
        """显示替代方案"""
        print("\n查询缓存替代方案:")
        print("=" * 40)
        
        for solution_name, content in self.query_cache_alternatives.items():
            print(f"\n{solution_name}:{content}")

# 查询缓存优化演示
query_cache_demo = MySQLQueryCacheOptimization()
query_cache_demo.show_query_cache_parameters()
query_cache_demo.show_monitoring()
query_cache_demo.show_alternatives()

9.3.5 系统级配置优化

class MySQLSystemOptimization:
    def __init__(self):
        self.system_parameters = {
            "操作系统优化": {
                "文件系统选择": {
                    "推荐文件系统": "ext4, XFS",
                    "挂载选项": "noatime,nobarrier",
                    "优化建议": [
                        "使用noatime减少访问时间更新",
                        "使用nobarrier提高写入性能(注意数据安全)",
                        "考虑使用XFS处理大文件",
                        "避免使用ext3(性能较差)"
                    ],
                    "配置示例": """
# /etc/fstab 配置示例
/dev/sdb1 /var/lib/mysql ext4 defaults,noatime,nobarrier 0 2

# 或者使用XFS
/dev/sdb1 /var/lib/mysql xfs defaults,noatime,nobarrier 0 2

# 检查当前挂载选项
mount | grep mysql

# 重新挂载应用选项
sudo mount -o remount,noatime,nobarrier /var/lib/mysql

# 检查文件系统性能
sudo hdparm -tT /dev/sdb1
"""
                },
                "内核参数优化": {
                    "vm.swappiness": {
                        "描述": "控制系统使用swap的倾向",
                        "推荐值": "1-10",
                        "默认值": "60",
                        "影响": "降低swap使用,提高数据库性能"
                    },
                    "vm.dirty_ratio": {
                        "描述": "脏页占系统内存的百分比阈值",
                        "推荐值": "5-15",
                        "默认值": "20",
                        "影响": "控制内存脏页刷新频率"
                    },
                    "vm.dirty_background_ratio": {
                        "描述": "后台刷新脏页的内存百分比阈值",
                        "推荐值": "2-5",
                        "默认值": "10",
                        "影响": "提前开始后台刷新,减少阻塞"
                    },
                    "配置示例": """
# /etc/sysctl.conf 配置
# 减少swap使用
vm.swappiness = 1

# 控制脏页刷新
vm.dirty_ratio = 10
vm.dirty_background_ratio = 3

# 网络优化
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216

# 文件描述符限制
fs.file-max = 2097152

# 应用配置
sudo sysctl -p

# 查看当前配置
sysctl vm.swappiness
sysctl vm.dirty_ratio
sysctl vm.dirty_background_ratio

# 临时修改(重启后失效)
echo 1 > /proc/sys/vm/swappiness
echo 10 > /proc/sys/vm/dirty_ratio
echo 3 > /proc/sys/vm/dirty_background_ratio
"""
                },
                "资源限制优化": {
                    "文件描述符限制": {
                        "推荐值": "65536或更高",
                        "检查命令": "ulimit -n",
                        "影响": "限制MySQL可打开的文件数量"
                    },
                    "进程限制": {
                        "推荐值": "32768或更高",
                        "检查命令": "ulimit -u",
                        "影响": "限制MySQL可创建的线程数量"
                    },
                    "配置示例": """
# /etc/security/limits.conf 配置
mysql soft nofile 65536
mysql hard nofile 65536
mysql soft nproc 32768
mysql hard nproc 32768

# 或者为所有用户设置
* soft nofile 65536
* hard nofile 65536
* soft nproc 32768
* hard nproc 32768

# systemd 服务限制
# /etc/systemd/system/mysql.service.d/limits.conf
[Service]
LimitNOFILE=65536
LimitNPROC=32768

# 重新加载systemd配置
sudo systemctl daemon-reload
sudo systemctl restart mysql

# 检查当前限制
ulimit -n  # 文件描述符
ulimit -u  # 进程数

# 检查MySQL进程的限制
cat /proc/$(pgrep mysqld)/limits
"""
                }
            },
            "硬件优化建议": {
                "CPU优化": {
                    "推荐配置": "多核心,高主频",
                    "优化建议": [
                        "选择高主频CPU提高单线程性能",
                        "多核心支持并发查询处理",
                        "启用超线程技术",
                        "考虑NUMA架构优化"
                    ],
                    "NUMA优化": """
# 检查NUMA配置
numactl --hardware
numastat

# 禁用NUMA(如果必要)
# 在GRUB配置中添加
# /etc/default/grub
GRUB_CMDLINE_LINUX="numa=off"

# 或者绑定MySQL到特定NUMA节点
numactl --cpunodebind=0 --membind=0 mysqld

# 在MySQL配置中
[mysqld]
# 绑定到特定CPU核心
innodb_numa_interleave = 1

# 检查MySQL进程的NUMA使用
numastat -p $(pgrep mysqld)
"""
                },
                "内存优化": {
                    "推荐配置": "ECC内存,足够容量",
                    "容量规划": [
                        "数据集大小 + 索引大小 + 缓冲区",
                        "预留系统和其他应用内存",
                        "考虑未来增长空间"
                    ],
                    "优化建议": """
# 内存容量规划公式
# 总内存 = 数据大小 + 索引大小 + InnoDB缓冲池 + 其他缓冲区 + 系统预留

# 查看数据和索引大小
SELECT 
    ROUND(SUM(DATA_LENGTH)/1024/1024/1024, 2) as data_gb,
    ROUND(SUM(INDEX_LENGTH)/1024/1024/1024, 2) as index_gb,
    ROUND(SUM(DATA_LENGTH + INDEX_LENGTH)/1024/1024/1024, 2) as total_gb
FROM information_schema.TABLES 
WHERE ENGINE = 'InnoDB';

# 内存使用监控
free -h
cat /proc/meminfo

# MySQL内存使用分析
SELECT 
    ROUND(SUM(LENGTH(VARIABLE_VALUE))/1024/1024, 2) as memory_mb
FROM performance_schema.global_variables 
WHERE VARIABLE_NAME IN (
    'innodb_buffer_pool_size',
    'key_buffer_size',
    'query_cache_size',
    'tmp_table_size',
    'max_heap_table_size'
);
"""
                },
                "存储优化": {
                    "推荐配置": "SSD存储,RAID配置",
                    "RAID建议": {
                        "RAID 10": "最佳性能和冗余平衡",
                        "RAID 5": "适合读多写少场景",
                        "RAID 0": "最高性能但无冗余"
                    },
                    "优化建议": """
# 存储性能测试
# 随机读写IOPS测试
fio --name=random-rw --ioengine=libaio --rw=randrw --bs=4k --size=1g --numjobs=4 --iodepth=32 --runtime=60 --time_based --group_reporting

# 顺序读写带宽测试
fio --name=seq-rw --ioengine=libaio --rw=rw --bs=1m --size=4g --numjobs=1 --iodepth=1 --runtime=60 --time_based

# 检查磁盘调度器
cat /sys/block/sda/queue/scheduler

# 设置适合SSD的调度器
echo noop > /sys/block/sda/queue/scheduler
# 或者
echo deadline > /sys/block/sda/queue/scheduler

# 永久设置(在/etc/default/grub中)
GRUB_CMDLINE_LINUX="elevator=noop"

# 检查磁盘对齐
sudo fdisk -l /dev/sda

# SSD优化
# 启用TRIM
sudo fstrim -v /var/lib/mysql

# 定期TRIM(添加到crontab)
0 2 * * 0 /sbin/fstrim -v /var/lib/mysql
"""
                }
            }
        }
        
        self.system_monitoring = {
            "系统性能监控": """
# 系统资源监控脚本
#!/bin/bash

echo "=== System Performance Report ==="
echo "Generated at: $(date)"
echo

# CPU使用率
echo "=== CPU Usage ==="
top -bn1 | grep "Cpu(s)" | awk '{print "CPU Usage: " $2}'
echo "Load Average: $(uptime | awk -F'load average:' '{print $2}')"
echo "CPU Cores: $(nproc)"
echo

# 内存使用
echo "=== Memory Usage ==="
free -h
echo
echo "Memory Usage Percentage:"
free | awk 'NR==2{printf "Used: %.2f%%, Available: %.2f%%\n", $3*100/$2, $7*100/$2}'
echo

# 磁盘I/O
echo "=== Disk I/O ==="
iostat -x 1 3 | tail -n +4
echo

# 网络统计
echo "=== Network Statistics ==="
ss -tuln | grep :3306
netstat -i
echo

# 系统负载
echo "=== System Load ==="
w
echo

# 进程信息
echo "=== MySQL Process Info ==="
ps aux | grep mysql | grep -v grep
echo

# 文件描述符使用
echo "=== File Descriptor Usage ==="
echo "System limit: $(cat /proc/sys/fs/file-max)"
echo "Current usage: $(cat /proc/sys/fs/file-nr | awk '{print $1}')"
echo "MySQL process limits:"
cat /proc/$(pgrep mysqld)/limits | grep "Max open files"
""",
            "性能瓶颈诊断": """
# 性能瓶颈诊断脚本
#!/bin/bash

echo "=== MySQL Performance Bottleneck Analysis ==="
echo

# CPU瓶颈检查
echo "=== CPU Bottleneck Check ==="
CPU_USAGE=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | sed 's/%us,//')
LOAD_AVG=$(uptime | awk -F'load average:' '{print $2}' | awk -F',' '{print $1}' | tr -d ' ')
CPU_CORES=$(nproc)

echo "CPU Usage: ${CPU_USAGE}%"
echo "Load Average: ${LOAD_AVG}"
echo "CPU Cores: ${CPU_CORES}"

if (( $(echo "$LOAD_AVG > $CPU_CORES" | bc -l) )); then
    echo "WARNING: High load average detected - possible CPU bottleneck"
fi
echo

# 内存瓶颈检查
echo "=== Memory Bottleneck Check ==="
MEM_USAGE=$(free | awk 'NR==2{printf "%.2f", $3*100/$2}')
SWAP_USAGE=$(free | awk 'NR==3{printf "%.2f", $3*100/$2}')

echo "Memory Usage: ${MEM_USAGE}%"
echo "Swap Usage: ${SWAP_USAGE}%"

if (( $(echo "$MEM_USAGE > 90" | bc -l) )); then
    echo "WARNING: High memory usage detected - possible memory bottleneck"
fi

if (( $(echo "$SWAP_USAGE > 10" | bc -l) )); then
    echo "WARNING: Swap usage detected - memory pressure"
fi
echo

# I/O瓶颈检查
echo "=== I/O Bottleneck Check ==="
IOSTAT_OUTPUT=$(iostat -x 1 2 | tail -n +7 | head -n -1)
echo "$IOSTAT_OUTPUT"

# 检查I/O等待时间
IOWAIT=$(top -bn1 | grep "Cpu(s)" | awk '{print $5}' | sed 's/%wa,//')
echo "I/O Wait: ${IOWAIT}%"

if (( $(echo "$IOWAIT > 20" | bc -l) )); then
    echo "WARNING: High I/O wait detected - possible I/O bottleneck"
fi
echo

# 网络瓶颈检查
echo "=== Network Bottleneck Check ==="
NETSTAT_OUTPUT=$(netstat -i | grep -v "Kernel\|Iface\|lo")
echo "$NETSTAT_OUTPUT"

# 检查连接数
CONNECTIONS=$(ss -tuln | grep :3306 | wc -l)
echo "MySQL Connections: $CONNECTIONS"
echo

echo "=== Recommendations ==="
if (( $(echo "$LOAD_AVG > $CPU_CORES" | bc -l) )); then
    echo "- Consider CPU upgrade or query optimization"
fi

if (( $(echo "$MEM_USAGE > 90" | bc -l) )); then
    echo "- Consider memory upgrade or buffer pool optimization"
fi

if (( $(echo "$IOWAIT > 20" | bc -l) )); then
    echo "- Consider SSD upgrade or I/O optimization"
fi

if (( $(echo "$SWAP_USAGE > 10" | bc -l) )); then
    echo "- Reduce memory usage or increase physical memory"
fi
"""
        }
        
        self.optimization_scripts = {
            "系统优化脚本": """
#!/bin/bash
# MySQL系统优化脚本

echo "=== MySQL System Optimization Script ==="
echo "This script will optimize system settings for MySQL"
echo "Please run as root or with sudo privileges"
echo

# 检查权限
if [ "$EUID" -ne 0 ]; then
    echo "Please run as root or with sudo"
    exit 1
fi

# 备份原始配置
cp /etc/sysctl.conf /etc/sysctl.conf.backup.$(date +%Y%m%d_%H%M%S)
cp /etc/security/limits.conf /etc/security/limits.conf.backup.$(date +%Y%m%d_%H%M%S)

echo "Original configurations backed up"
echo

# 优化内核参数
echo "=== Optimizing Kernel Parameters ==="
cat >> /etc/sysctl.conf << EOF

# MySQL Optimization Parameters
# Added on $(date)

# Memory Management
vm.swappiness = 1
vm.dirty_ratio = 10
vm.dirty_background_ratio = 3

# Network Optimization
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_congestion_control = bbr

# File System
fs.file-max = 2097152
fs.aio-max-nr = 1048576
EOF

# 应用内核参数
sysctl -p
echo "Kernel parameters optimized"
echo

# 优化资源限制
echo "=== Optimizing Resource Limits ==="
cat >> /etc/security/limits.conf << EOF

# MySQL Resource Limits
# Added on $(date)
mysql soft nofile 65536
mysql hard nofile 65536
mysql soft nproc 32768
mysql hard nproc 32768

# Global limits
* soft nofile 65536
* hard nofile 65536
* soft nproc 32768
* hard nproc 32768
EOF

echo "Resource limits optimized"
echo

# 优化磁盘调度器
echo "=== Optimizing Disk Scheduler ==="
for disk in $(lsblk -d -n -o NAME | grep -E '^sd|^nvme'); do
    if [ -f "/sys/block/$disk/queue/scheduler" ]; then
        echo "Optimizing scheduler for $disk"
        echo noop > /sys/block/$disk/queue/scheduler 2>/dev/null || 
        echo deadline > /sys/block/$disk/queue/scheduler 2>/dev/null
        echo "Current scheduler for $disk: $(cat /sys/block/$disk/queue/scheduler)"
    fi
done
echo

# 创建systemd配置目录
mkdir -p /etc/systemd/system/mysql.service.d

# 优化MySQL服务限制
echo "=== Optimizing MySQL Service Limits ==="
cat > /etc/systemd/system/mysql.service.d/limits.conf << EOF
[Service]
LimitNOFILE=65536
LimitNPROC=32768
LimitMEMLOCK=infinity
EOF

# 重新加载systemd
systemctl daemon-reload
echo "MySQL service limits optimized"
echo

# 优化透明大页
echo "=== Disabling Transparent Huge Pages ==="
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag

# 永久禁用透明大页
cat >> /etc/rc.local << EOF
# Disable Transparent Huge Pages for MySQL
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
EOF

chmod +x /etc/rc.local
echo "Transparent Huge Pages disabled"
echo

echo "=== Optimization Complete ==="
echo "Please reboot the system to apply all changes"
echo "After reboot, restart MySQL service:"
echo "sudo systemctl restart mysql"
echo
echo "To verify optimizations:"
echo "- Check kernel parameters: sysctl vm.swappiness"
echo "- Check file limits: ulimit -n"
echo "- Check MySQL limits: cat /proc/\$(pgrep mysqld)/limits"
""",
            "性能监控脚本": """
#!/bin/bash
# MySQL性能持续监控脚本

MONITOR_INTERVAL=60  # 监控间隔(秒)
LOG_FILE="/var/log/mysql_performance.log"
ALERT_EMAIL="admin@example.com"

# 创建日志文件
touch $LOG_FILE

echo "=== MySQL Performance Monitor Started ==="
echo "Monitoring interval: ${MONITOR_INTERVAL} seconds"
echo "Log file: $LOG_FILE"
echo "Press Ctrl+C to stop"
echo

while true; do
    TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
    
    # 获取系统指标
    CPU_USAGE=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | sed 's/%us,//')
    LOAD_AVG=$(uptime | awk -F'load average:' '{print $2}' | awk -F',' '{print $1}' | tr -d ' ')
    MEM_USAGE=$(free | awk 'NR==2{printf "%.2f", $3*100/$2}')
    SWAP_USAGE=$(free | awk 'NR==3{printf "%.2f", $3*100/$2}')
    IOWAIT=$(top -bn1 | grep "Cpu(s)" | awk '{print $5}' | sed 's/%wa,//')
    
    # 获取MySQL指标
    MYSQL_CONNECTIONS=$(mysql -e "SHOW STATUS LIKE 'Threads_connected';" | tail -1 | awk '{print $2}' 2>/dev/null || echo "N/A")
    MYSQL_QPS=$(mysql -e "SHOW STATUS LIKE 'Questions';" | tail -1 | awk '{print $2}' 2>/dev/null || echo "N/A")
    
    # 记录到日志
    echo "$TIMESTAMP,CPU:$CPU_USAGE,LOAD:$LOAD_AVG,MEM:$MEM_USAGE,SWAP:$SWAP_USAGE,IOWAIT:$IOWAIT,CONN:$MYSQL_CONNECTIONS,QPS:$MYSQL_QPS" >> $LOG_FILE
    
    # 检查告警条件
    ALERT_MSG=""
    
    if (( $(echo "$CPU_USAGE > 80" | bc -l) )); then
        ALERT_MSG="$ALERT_MSG High CPU usage: ${CPU_USAGE}%\n"
    fi
    
    if (( $(echo "$MEM_USAGE > 90" | bc -l) )); then
        ALERT_MSG="$ALERT_MSG High memory usage: ${MEM_USAGE}%\n"
    fi
    
    if (( $(echo "$SWAP_USAGE > 10" | bc -l) )); then
        ALERT_MSG="$ALERT_MSG Swap usage detected: ${SWAP_USAGE}%\n"
    fi
    
    if (( $(echo "$IOWAIT > 20" | bc -l) )); then
        ALERT_MSG="$ALERT_MSG High I/O wait: ${IOWAIT}%\n"
    fi
    
    # 发送告警
    if [ ! -z "$ALERT_MSG" ]; then
        echo -e "MySQL Performance Alert at $TIMESTAMP:\n$ALERT_MSG" | mail -s "MySQL Performance Alert" $ALERT_EMAIL 2>/dev/null
        echo "ALERT: $ALERT_MSG" | tr -d '\n'
        echo
    fi
    
    # 显示当前状态
    printf "\r%s - CPU:%s%% MEM:%s%% SWAP:%s%% IO:%s%% CONN:%s" "$TIMESTAMP" "$CPU_USAGE" "$MEM_USAGE" "$SWAP_USAGE" "$IOWAIT" "$MYSQL_CONNECTIONS"
    
    sleep $MONITOR_INTERVAL
done
"""
        }
    
    def show_system_parameters(self):
        """显示系统参数"""
        print("MySQL系统级配置优化:")
        print("=" * 60)
        
        for category, parameters in self.system_parameters.items():
            print(f"\n{category}:")
            
            if isinstance(parameters, dict):
                for param, details in parameters.items():
                    print(f"\n  {param}:")
                    
                    if isinstance(details, dict):
                        for key, value in details.items():
                            if isinstance(value, list):
                                print(f"    {key}:")
                                for item in value:
                                    print(f"      • {item}")
                            elif isinstance(value, dict):
                                print(f"    {key}:")
                                for subkey, subvalue in value.items():
                                    print(f"      {subkey}: {subvalue}")
                            else:
                                print(f"    {key}: {value}")
                    else:
                        print(f"    {details}")
    
    def show_monitoring(self):
        """显示监控方法"""
        print("\n系统监控:")
        print("=" * 40)
        
        for category, content in self.system_monitoring.items():
            print(f"\n{category}:{content}")
    
    def show_scripts(self):
        """显示优化脚本"""
        print("\n系统优化脚本:")
        print("=" * 40)
        
        for script_name, content in self.optimization_scripts.items():
            print(f"\n{script_name}:{content}")

# 系统优化演示
system_optimization_demo = MySQLSystemOptimization()
system_optimization_demo.show_system_parameters()
system_optimization_demo.show_monitoring()
system_optimization_demo.show_scripts()

9.4 监控系统实践

9.4.1 Performance Schema 监控

class MySQLPerformanceSchemaMonitoring:
    def __init__(self):
        self.performance_schema_queries = {
            "连接监控": {
                "当前连接统计": """
-- 当前连接数和状态
SELECT 
    processlist_state,
    COUNT(*) as connection_count,
    ROUND(COUNT(*) * 100.0 / (SELECT COUNT(*) FROM performance_schema.processlist), 2) as percentage
FROM performance_schema.processlist 
WHERE processlist_command != 'Daemon'
GROUP BY processlist_state
ORDER BY connection_count DESC;

-- 连接来源分析
SELECT 
    processlist_host,
    processlist_user,
    COUNT(*) as connection_count,
    GROUP_CONCAT(DISTINCT processlist_db) as databases
FROM performance_schema.processlist 
WHERE processlist_command != 'Daemon'
GROUP BY processlist_host, processlist_user
ORDER BY connection_count DESC;

-- 长时间运行的连接
SELECT 
    processlist_id,
    processlist_user,
    processlist_host,
    processlist_db,
    processlist_command,
    processlist_time,
    LEFT(processlist_info, 100) as query_snippet
FROM performance_schema.processlist 
WHERE processlist_time > 300  -- 超过5分钟
    AND processlist_command != 'Sleep'
ORDER BY processlist_time DESC;
""",
                "连接历史分析": """
-- 连接历史统计
SELECT 
    user,
    host,
    COUNT_STAR as total_connections,
    SUM_TIMER_WAIT/1000000000000 as total_time_seconds,
    AVG_TIMER_WAIT/1000000000000 as avg_time_seconds,
    MAX_TIMER_WAIT/1000000000000 as max_time_seconds
FROM performance_schema.events_waits_summary_by_account_by_event_name 
WHERE event_name = 'wait/io/socket/sql/client_connection'
ORDER BY total_connections DESC;

-- 连接错误统计
SELECT 
    user,
    host,
    SUM_ERROR_RAISED as total_errors,
    SUM_ERROR_HANDLED as handled_errors,
    SUM_WARNINGS_RAISED as total_warnings
FROM performance_schema.events_errors_summary_by_account_by_error
WHERE SUM_ERROR_RAISED > 0
ORDER BY total_errors DESC;
"""
            },
            "查询性能监控": {
                "慢查询分析": """
-- Top 10 慢查询(按总执行时间)
SELECT 
    DIGEST_TEXT,
    COUNT_STAR as exec_count,
    SUM_TIMER_WAIT/1000000000000 as total_time_sec,
    AVG_TIMER_WAIT/1000000000000 as avg_time_sec,
    MAX_TIMER_WAIT/1000000000000 as max_time_sec,
    SUM_ROWS_EXAMINED as total_rows_examined,
    SUM_ROWS_SENT as total_rows_sent,
    SUM_CREATED_TMP_TABLES as tmp_tables,
    SUM_CREATED_TMP_DISK_TABLES as tmp_disk_tables
FROM performance_schema.events_statements_summary_by_digest 
WHERE DIGEST_TEXT IS NOT NULL
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 10;

-- Top 10 慢查询(按平均执行时间)
SELECT 
    DIGEST_TEXT,
    COUNT_STAR as exec_count,
    AVG_TIMER_WAIT/1000000000000 as avg_time_sec,
    MAX_TIMER_WAIT/1000000000000 as max_time_sec,
    SUM_TIMER_WAIT/1000000000000 as total_time_sec,
    ROUND(SUM_ROWS_EXAMINED/COUNT_STAR, 2) as avg_rows_examined,
    ROUND(SUM_ROWS_SENT/COUNT_STAR, 2) as avg_rows_sent
FROM performance_schema.events_statements_summary_by_digest 
WHERE DIGEST_TEXT IS NOT NULL
    AND COUNT_STAR > 10  -- 至少执行10次
ORDER BY AVG_TIMER_WAIT DESC
LIMIT 10;

-- 全表扫描查询
SELECT 
    DIGEST_TEXT,
    COUNT_STAR as exec_count,
    SUM_NO_INDEX_USED as no_index_used_count,
    SUM_NO_GOOD_INDEX_USED as no_good_index_used_count,
    ROUND(SUM_NO_INDEX_USED * 100.0 / COUNT_STAR, 2) as no_index_percentage,
    AVG_TIMER_WAIT/1000000000000 as avg_time_sec
FROM performance_schema.events_statements_summary_by_digest 
WHERE SUM_NO_INDEX_USED > 0
    OR SUM_NO_GOOD_INDEX_USED > 0
ORDER BY SUM_NO_INDEX_USED DESC
LIMIT 10;
""",
                "查询统计分析": """
-- 查询类型统计
SELECT 
    CASE 
        WHEN DIGEST_TEXT LIKE 'SELECT%' THEN 'SELECT'
        WHEN DIGEST_TEXT LIKE 'INSERT%' THEN 'INSERT'
        WHEN DIGEST_TEXT LIKE 'UPDATE%' THEN 'UPDATE'
        WHEN DIGEST_TEXT LIKE 'DELETE%' THEN 'DELETE'
        WHEN DIGEST_TEXT LIKE 'REPLACE%' THEN 'REPLACE'
        ELSE 'OTHER'
    END as query_type,
    COUNT(*) as query_count,
    SUM(COUNT_STAR) as total_executions,
    SUM(SUM_TIMER_WAIT)/1000000000000 as total_time_sec,
    AVG(AVG_TIMER_WAIT)/1000000000000 as avg_time_sec
FROM performance_schema.events_statements_summary_by_digest 
WHERE DIGEST_TEXT IS NOT NULL
GROUP BY query_type
ORDER BY total_executions DESC;

-- 错误查询统计
SELECT 
    DIGEST_TEXT,
    COUNT_STAR as exec_count,
    SUM_ERRORS as error_count,
    SUM_WARNINGS as warning_count,
    ROUND(SUM_ERRORS * 100.0 / COUNT_STAR, 2) as error_percentage,
    FIRST_SEEN,
    LAST_SEEN
FROM performance_schema.events_statements_summary_by_digest 
WHERE SUM_ERRORS > 0
ORDER BY SUM_ERRORS DESC
LIMIT 10;
"""
            },
            "资源使用监控": {
                "内存使用分析": """
-- 内存使用统计
SELECT 
    event_name,
    CURRENT_COUNT_USED as current_count,
    HIGH_COUNT_USED as high_count,
    CURRENT_SIZE_USED/1024/1024 as current_size_mb,
    HIGH_SIZE_USED/1024/1024 as high_size_mb
FROM performance_schema.memory_summary_global_by_event_name 
WHERE CURRENT_SIZE_USED > 0
ORDER BY CURRENT_SIZE_USED DESC
LIMIT 20;

-- 按用户的内存使用
SELECT 
    user,
    CURRENT_COUNT_USED as current_count,
    CURRENT_SIZE_USED/1024/1024 as current_size_mb,
    HIGH_COUNT_USED as high_count,
    HIGH_SIZE_USED/1024/1024 as high_size_mb
FROM performance_schema.memory_summary_by_user_by_event_name 
WHERE event_name = 'memory/sql/user_var_entry'
    AND CURRENT_SIZE_USED > 0
ORDER BY CURRENT_SIZE_USED DESC;

-- 临时表使用统计
SELECT 
    DIGEST_TEXT,
    COUNT_STAR as exec_count,
    SUM_CREATED_TMP_TABLES as tmp_tables_created,
    SUM_CREATED_TMP_DISK_TABLES as tmp_disk_tables_created,
    ROUND(SUM_CREATED_TMP_DISK_TABLES * 100.0 / SUM_CREATED_TMP_TABLES, 2) as disk_tmp_percentage,
    AVG_TIMER_WAIT/1000000000000 as avg_time_sec
FROM performance_schema.events_statements_summary_by_digest 
WHERE SUM_CREATED_TMP_TABLES > 0
ORDER BY SUM_CREATED_TMP_DISK_TABLES DESC
LIMIT 10;
""",
                "I/O性能分析": """
-- 文件I/O统计
SELECT 
    file_name,
    event_name,
    COUNT_STAR as io_count,
    SUM_TIMER_WAIT/1000000000000 as total_time_sec,
    AVG_TIMER_WAIT/1000000000000 as avg_time_sec,
    SUM_NUMBER_OF_BYTES_READ/1024/1024 as total_read_mb,
    SUM_NUMBER_OF_BYTES_WRITE/1024/1024 as total_write_mb
FROM performance_schema.file_summary_by_event_name 
WHERE COUNT_STAR > 0
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 20;

-- 表I/O统计
SELECT 
    OBJECT_SCHEMA,
    OBJECT_NAME,
    COUNT_READ,
    COUNT_WRITE,
    COUNT_FETCH,
    COUNT_INSERT,
    COUNT_UPDATE,
    COUNT_DELETE,
    SUM_TIMER_WAIT/1000000000000 as total_time_sec
FROM performance_schema.table_io_waits_summary_by_table 
WHERE COUNT_STAR > 0
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 20;

-- 索引使用统计
SELECT 
    OBJECT_SCHEMA,
    OBJECT_NAME,
    INDEX_NAME,
    COUNT_FETCH,
    COUNT_INSERT,
    COUNT_UPDATE,
    COUNT_DELETE,
    SUM_TIMER_WAIT/1000000000000 as total_time_sec
FROM performance_schema.table_io_waits_summary_by_index_usage 
WHERE COUNT_STAR > 0
ORDER BY COUNT_FETCH DESC
LIMIT 20;
"""
            },
            "锁等待监控": {
                "锁等待分析": """
-- 当前锁等待
SELECT 
    r.trx_id as waiting_trx_id,
    r.trx_mysql_thread_id as waiting_thread,
    r.trx_query as waiting_query,
    b.trx_id as blocking_trx_id,
    b.trx_mysql_thread_id as blocking_thread,
    b.trx_query as blocking_query,
    l.lock_table,
    l.lock_index,
    l.lock_mode,
    l.lock_type
FROM information_schema.innodb_lock_waits w
INNER JOIN information_schema.innodb_trx r ON r.trx_id = w.requesting_trx_id
INNER JOIN information_schema.innodb_trx b ON b.trx_id = w.blocking_trx_id
INNER JOIN information_schema.innodb_locks l ON l.lock_trx_id = w.blocking_trx_id;

-- 锁等待历史统计
SELECT 
    OBJECT_SCHEMA,
    OBJECT_NAME,
    INDEX_NAME,
    LOCK_TYPE,
    COUNT_STAR as lock_count,
    SUM_TIMER_WAIT/1000000000000 as total_wait_time_sec,
    AVG_TIMER_WAIT/1000000000000 as avg_wait_time_sec,
    MAX_TIMER_WAIT/1000000000000 as max_wait_time_sec
FROM performance_schema.events_waits_summary_by_instance 
WHERE EVENT_NAME LIKE 'wait/lock%'
    AND COUNT_STAR > 0
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 20;

-- 死锁检测
SELECT 
    event_name,
    COUNT_STAR as deadlock_count,
    SUM_TIMER_WAIT/1000000000000 as total_time_sec
FROM performance_schema.events_waits_summary_global_by_event_name 
WHERE event_name LIKE '%deadlock%'
    AND COUNT_STAR > 0;
"""
            }
        }
        
        self.monitoring_scripts = {
            "Performance Schema监控脚本": """
#!/bin/bash
# Performance Schema 监控脚本

MYSQL_USER="monitor"
MYSQL_PASSWORD="monitor_password"
MYSQL_HOST="localhost"
MYSQL_PORT="3306"
REPORT_FILE="/var/log/mysql_performance_report.txt"

echo "=== MySQL Performance Schema Report ===" > $REPORT_FILE
echo "Generated at: $(date)" >> $REPORT_FILE
echo >> $REPORT_FILE

# 连接统计
echo "=== Connection Statistics ===" >> $REPORT_FILE
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
SELECT 
    processlist_state,
    COUNT(*) as connection_count
FROM performance_schema.processlist 
WHERE processlist_command != 'Daemon'
GROUP BY processlist_state
ORDER BY connection_count DESC;
" >> $REPORT_FILE 2>/dev/null
echo >> $REPORT_FILE

# Top 5 慢查询
echo "=== Top 5 Slow Queries ===" >> $REPORT_FILE
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
SELECT 
    LEFT(DIGEST_TEXT, 80) as query_snippet,
    COUNT_STAR as exec_count,
    ROUND(AVG_TIMER_WAIT/1000000000000, 3) as avg_time_sec,
    ROUND(SUM_TIMER_WAIT/1000000000000, 3) as total_time_sec
FROM performance_schema.events_statements_summary_by_digest 
WHERE DIGEST_TEXT IS NOT NULL
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 5;
" >> $REPORT_FILE 2>/dev/null
echo >> $REPORT_FILE

# 内存使用Top 10
echo "=== Top 10 Memory Usage ===" >> $REPORT_FILE
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
SELECT 
    event_name,
    ROUND(CURRENT_SIZE_USED/1024/1024, 2) as current_size_mb,
    ROUND(HIGH_SIZE_USED/1024/1024, 2) as high_size_mb
FROM performance_schema.memory_summary_global_by_event_name 
WHERE CURRENT_SIZE_USED > 0
ORDER BY CURRENT_SIZE_USED DESC
LIMIT 10;
" >> $REPORT_FILE 2>/dev/null
echo >> $REPORT_FILE

# 锁等待检查
echo "=== Lock Waits Check ===" >> $REPORT_FILE
LOCK_WAITS=$(mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
SELECT COUNT(*) FROM information_schema.innodb_lock_waits;
" 2>/dev/null | tail -1)

if [ "$LOCK_WAITS" -gt 0 ]; then
    echo "WARNING: $LOCK_WAITS lock waits detected!" >> $REPORT_FILE
    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
    SELECT 
        r.trx_mysql_thread_id as waiting_thread,
        LEFT(r.trx_query, 50) as waiting_query,
        b.trx_mysql_thread_id as blocking_thread,
        LEFT(b.trx_query, 50) as blocking_query
    FROM information_schema.innodb_lock_waits w
    INNER JOIN information_schema.innodb_trx r ON r.trx_id = w.requesting_trx_id
    INNER JOIN information_schema.innodb_trx b ON b.trx_id = w.blocking_trx_id;
    " >> $REPORT_FILE 2>/dev/null
else
    echo "No lock waits detected." >> $REPORT_FILE
fi
echo >> $REPORT_FILE

echo "Report saved to: $REPORT_FILE"

# 如果有告警,发送邮件
if grep -q "WARNING" $REPORT_FILE; then
    mail -s "MySQL Performance Alert" admin@example.com < $REPORT_FILE 2>/dev/null
fi
""",
            "实时监控脚本": """
#!/bin/bash
# MySQL实时性能监控脚本

MYSQL_USER="monitor"
MYSQL_PASSWORD="monitor_password"
MYSQL_HOST="localhost"
MYSQL_PORT="3306"
MONITOR_INTERVAL=5

echo "=== MySQL Real-time Performance Monitor ==="
echo "Press Ctrl+C to stop"
echo

while true; do
    clear
    echo "=== MySQL Performance Dashboard - $(date) ==="
    echo
    
    # 连接数
    echo "=== Connections ==="
    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
    SELECT 
        'Total Connections' as metric,
        COUNT(*) as value
    FROM performance_schema.processlist
    UNION ALL
    SELECT 
        'Active Connections' as metric,
        COUNT(*) as value
    FROM performance_schema.processlist 
    WHERE processlist_command != 'Sleep'
    UNION ALL
    SELECT 
        'Running Queries' as metric,
        COUNT(*) as value
    FROM performance_schema.processlist 
    WHERE processlist_state = 'executing';
    " 2>/dev/null
    echo
    
    # QPS统计
    echo "=== Query Statistics (Last 5 seconds) ==="
    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
    SELECT 
        VARIABLE_NAME as metric,
        VARIABLE_VALUE as value
    FROM performance_schema.global_status 
    WHERE VARIABLE_NAME IN (
        'Queries',
        'Questions', 
        'Com_select',
        'Com_insert',
        'Com_update',
        'Com_delete'
    );
    " 2>/dev/null
    echo
    
    # 慢查询
    echo "=== Current Slow Queries ==="
    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
    SELECT 
        processlist_id,
        processlist_user,
        processlist_host,
        processlist_time,
        LEFT(processlist_info, 60) as query_snippet
    FROM performance_schema.processlist 
    WHERE processlist_time > 5
        AND processlist_command != 'Sleep'
        AND processlist_info IS NOT NULL
    ORDER BY processlist_time DESC
    LIMIT 5;
    " 2>/dev/null
    echo
    
    # 锁等待
    echo "=== Lock Waits ==="
    LOCK_COUNT=$(mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
    SELECT COUNT(*) FROM information_schema.innodb_lock_waits;
    " 2>/dev/null | tail -1)
    
    if [ "$LOCK_COUNT" -gt 0 ]; then
        echo "WARNING: $LOCK_COUNT lock waits detected!"
        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -P$MYSQL_PORT -e "
        SELECT 
            r.trx_mysql_thread_id as waiting_thread,
            b.trx_mysql_thread_id as blocking_thread,
            l.lock_table,
            l.lock_mode
        FROM information_schema.innodb_lock_waits w
        INNER JOIN information_schema.innodb_trx r ON r.trx_id = w.requesting_trx_id
        INNER JOIN information_schema.innodb_trx b ON b.trx_id = w.blocking_trx_id
        INNER JOIN information_schema.innodb_locks l ON l.lock_trx_id = w.blocking_trx_id;
        " 2>/dev/null
    else
        echo "No lock waits detected."
    fi
    
    sleep $MONITOR_INTERVAL
done
"""
        }
    
    def show_performance_schema_queries(self):
        """显示Performance Schema查询"""
        print("Performance Schema 监控查询:")
        print("=" * 60)
        
        for category, queries in self.performance_schema_queries.items():
            print(f"\n{category}:")
            
            for query_name, query_content in queries.items():
                print(f"\n  {query_name}:{query_content}")
    
    def show_monitoring_scripts(self):
        """显示监控脚本"""
        print("\nPerformance Schema 监控脚本:")
        print("=" * 50)
        
        for script_name, script_content in self.monitoring_scripts.items():
            print(f"\n{script_name}:{script_content}")

# Performance Schema监控演示
performance_schema_demo = MySQLPerformanceSchemaMonitoring()
performance_schema_demo.show_performance_schema_queries()
performance_schema_demo.show_monitoring_scripts()

9.4.2 慢查询日志分析

class MySQLSlowQueryAnalysis:
    def __init__(self):
        self.slow_query_config = {
            "慢查询日志配置": {
                "slow_query_log": {
                    "描述": "启用慢查询日志",
                    "推荐值": "ON",
                    "配置示例": """
# 启用慢查询日志
slow_query_log = 1
slow_query_log_file = /var/log/mysql/mysql-slow.log

# 动态启用
SET GLOBAL slow_query_log = 'ON';
SET GLOBAL slow_query_log_file = '/var/log/mysql/mysql-slow.log';

# 查看当前配置
SHOW VARIABLES LIKE 'slow_query_log%';
"""
                },
                "long_query_time": {
                    "描述": "慢查询阈值(秒)",
                    "推荐值": "1-5秒",
                    "配置示例": """
# 设置慢查询阈值为2秒
long_query_time = 2

# 动态设置
SET GLOBAL long_query_time = 2;

# 查看当前阈值
SHOW VARIABLES LIKE 'long_query_time';

# 设置更精确的阈值(微秒级)
SET GLOBAL long_query_time = 0.5;  -- 500毫秒
"""
                },
                "log_queries_not_using_indexes": {
                    "描述": "记录未使用索引的查询",
                    "推荐值": "ON(开发环境)",
                    "配置示例": """
# 记录未使用索引的查询
log_queries_not_using_indexes = 1

# 动态启用
SET GLOBAL log_queries_not_using_indexes = 'ON';

# 限制未使用索引查询的记录频率
min_examined_row_limit = 1000;

# 查看配置
SHOW VARIABLES LIKE 'log_queries_not_using_indexes';
SHOW VARIABLES LIKE 'min_examined_row_limit';
"""
                },
                "log_slow_admin_statements": {
                    "描述": "记录慢管理语句",
                    "推荐值": "ON",
                    "配置示例": """
# 记录慢管理语句(如ALTER TABLE)
log_slow_admin_statements = 1

# 动态启用
SET GLOBAL log_slow_admin_statements = 'ON';

# 查看配置
SHOW VARIABLES LIKE 'log_slow_admin_statements';
"""
                }
            }
        }
        
        self.analysis_tools = {
            "mysqldumpslow分析": """
# mysqldumpslow 基本用法
# 分析慢查询日志的前10条记录
mysqldumpslow -t 10 /var/log/mysql/mysql-slow.log

# 按查询时间排序
mysqldumpslow -s t -t 10 /var/log/mysql/mysql-slow.log

# 按锁定时间排序
mysqldumpslow -s l -t 10 /var/log/mysql/mysql-slow.log

# 按返回记录数排序
mysqldumpslow -s r -t 10 /var/log/mysql/mysql-slow.log

# 按检查记录数排序
mysqldumpslow -s e -t 10 /var/log/mysql/mysql-slow.log

# 显示详细信息
mysqldumpslow -v -t 10 /var/log/mysql/mysql-slow.log

# 过滤特定数据库
mysqldumpslow -t 10 /var/log/mysql/mysql-slow.log | grep "database_name"

# 分析最近的日志
mysqldumpslow -t 10 $(ls -t /var/log/mysql/mysql-slow.log* | head -1)

# 生成报告
mysqldumpslow -s t -t 20 /var/log/mysql/mysql-slow.log > slow_query_report.txt
""",
            "pt-query-digest分析": """
# pt-query-digest 高级分析工具

# 基本分析
pt-query-digest /var/log/mysql/mysql-slow.log

# 分析最近1小时的日志
pt-query-digest --since '1h' /var/log/mysql/mysql-slow.log

# 分析特定时间段
pt-query-digest --since '2024-01-01 00:00:00' --until '2024-01-01 23:59:59' /var/log/mysql/mysql-slow.log

# 只分析SELECT语句
pt-query-digest --filter '$event->{arg} =~ m/^select/i' /var/log/mysql/mysql-slow.log

# 生成HTML报告
pt-query-digest --output html /var/log/mysql/mysql-slow.log > slow_query_report.html

# 分析并保存到数据库
pt-query-digest --review h=localhost,D=performance,t=query_review \
                --history h=localhost,D=performance,t=query_history \
                /var/log/mysql/mysql-slow.log

# 实时分析(监控模式)
pt-query-digest --processlist h=localhost,u=monitor,p=password \
                --run-time 60

# 分析二进制日志
mysqlbinlog mysql-bin.000001 | pt-query-digest --type binlog

# 过滤特定用户的查询
pt-query-digest --filter '$event->{user} eq "app_user"' /var/log/mysql/mysql-slow.log

# 按数据库分组分析
pt-query-digest --group-by db /var/log/mysql/mysql-slow.log
""",
            "自定义分析脚本": """
#!/bin/bash
# 慢查询日志分析脚本

SLOW_LOG="/var/log/mysql/mysql-slow.log"
REPORT_FILE="/var/log/mysql/slow_query_analysis.txt"
DATE=$(date '+%Y-%m-%d %H:%M:%S')

echo "=== MySQL Slow Query Analysis Report ===" > $REPORT_FILE
echo "Generated at: $DATE" >> $REPORT_FILE
echo "Log file: $SLOW_LOG" >> $REPORT_FILE
echo >> $REPORT_FILE

# 检查日志文件是否存在
if [ ! -f "$SLOW_LOG" ]; then
    echo "ERROR: Slow query log file not found: $SLOW_LOG" >> $REPORT_FILE
    exit 1
fi

# 基本统计
echo "=== Basic Statistics ===" >> $REPORT_FILE
echo "Total slow queries: $(grep -c '^# Time:' $SLOW_LOG)" >> $REPORT_FILE
echo "Log file size: $(du -h $SLOW_LOG | cut -f1)" >> $REPORT_FILE
echo "Date range: $(head -1 $SLOW_LOG | grep '^# Time:' | cut -d' ' -f3-) to $(tail -20 $SLOW_LOG | grep '^# Time:' | tail -1 | cut -d' ' -f3-)" >> $REPORT_FILE
echo >> $REPORT_FILE

# 使用mysqldumpslow分析
echo "=== Top 10 Slow Queries by Total Time ===" >> $REPORT_FILE
mysqldumpslow -s t -t 10 $SLOW_LOG >> $REPORT_FILE 2>/dev/null
echo >> $REPORT_FILE

echo "=== Top 10 Slow Queries by Average Time ===" >> $REPORT_FILE
mysqldumpslow -s at -t 10 $SLOW_LOG >> $REPORT_FILE 2>/dev/null
echo >> $REPORT_FILE

echo "=== Top 10 Most Frequent Slow Queries ===" >> $REPORT_FILE
mysqldumpslow -s c -t 10 $SLOW_LOG >> $REPORT_FILE 2>/dev/null
echo >> $REPORT_FILE

# 查找特定模式
echo "=== Queries Not Using Indexes ===" >> $REPORT_FILE
grep -A 5 "Query_time.*Rows_examined.*" $SLOW_LOG | grep -B 5 -A 5 "No index used" | head -50 >> $REPORT_FILE
echo >> $REPORT_FILE

echo "=== Long Running Queries (>10 seconds) ===" >> $REPORT_FILE
grep "Query_time: [1-9][0-9]\|Query_time: [0-9]\{2,\}" $SLOW_LOG | head -20 >> $REPORT_FILE
echo >> $REPORT_FILE

# 按小时统计
echo "=== Slow Queries by Hour ===" >> $REPORT_FILE
grep "^# Time:" $SLOW_LOG | cut -d' ' -f4 | cut -d':' -f1 | sort | uniq -c | sort -nr >> $REPORT_FILE
echo >> $REPORT_FILE

echo "Analysis complete. Report saved to: $REPORT_FILE"

# 如果有pt-query-digest,生成详细报告
if command -v pt-query-digest >/dev/null 2>&1; then
    echo "Generating detailed analysis with pt-query-digest..."
    pt-query-digest $SLOW_LOG > "${REPORT_FILE%.txt}_detailed.txt"
    echo "Detailed report saved to: ${REPORT_FILE%.txt}_detailed.txt"
fi

# 检查是否有严重的慢查询
SEVERE_SLOW=$(grep "Query_time: [1-9][0-9]\|Query_time: [0-9]\{2,\}" $SLOW_LOG | wc -l)
if [ $SEVERE_SLOW -gt 0 ]; then
    echo "WARNING: Found $SEVERE_SLOW queries taking more than 10 seconds!"
    # 发送告警邮件
    echo "Severe slow queries detected. See attached report." | mail -s "MySQL Slow Query Alert" -A $REPORT_FILE admin@example.com 2>/dev/null
fi
"""
        }
        
        self.optimization_recommendations = {
            "慢查询优化建议": [
                "添加合适的索引",
                "优化查询语句结构",
                "避免SELECT *",
                "使用LIMIT限制结果集",
                "优化JOIN操作",
                "避免在WHERE子句中使用函数",
                "考虑分区表",
                "优化子查询",
                "使用查询缓存(适当情况下)",
                "考虑读写分离"
            ],
            "索引优化策略": {
                "单列索引": "为经常在WHERE、ORDER BY、GROUP BY中使用的列创建索引",
                "复合索引": "为多列查询条件创建复合索引,注意列的顺序",
                "覆盖索引": "创建包含查询所需所有列的索引,避免回表操作",
                "前缀索引": "对于长字符串列,考虑使用前缀索引",
                "函数索引": "MySQL 8.0支持函数索引,可以为函数表达式创建索引"
            },
            "查询重写技巧": {
                "子查询优化": "将相关子查询改写为JOIN",
                "UNION优化": "使用UNION ALL代替UNION(如果不需要去重)",
                "EXISTS vs IN": "根据数据分布选择合适的操作符",
                "分页优化": "使用延迟关联优化大偏移量的分页查询",
                "临时表优化": "避免创建大型临时表,考虑分步处理"
            }
        }
    
    def show_slow_query_config(self):
        """显示慢查询配置"""
        print("慢查询日志配置:")
        print("=" * 50)
        
        for category, configs in self.slow_query_config.items():
            print(f"\n{category}:")
            
            for config_name, config_details in configs.items():
                print(f"\n  {config_name}:")
                print(f"    描述: {config_details['描述']}")
                print(f"    推荐值: {config_details['推荐值']}")
                print(f"    配置示例:{config_details['配置示例']}")
    
    def show_analysis_tools(self):
        """显示分析工具"""
        print("\n慢查询分析工具:")
        print("=" * 40)
        
        for tool_name, tool_content in self.analysis_tools.items():
            print(f"\n{tool_name}:{tool_content}")
    
    def show_optimization_recommendations(self):
        """显示优化建议"""
        print("\n慢查询优化建议:")
        print("=" * 40)
        
        for category, content in self.optimization_recommendations.items():
            print(f"\n{category}:")
            
            if isinstance(content, list):
                for item in content:
                    print(f"  • {item}")
            elif isinstance(content, dict):
                for key, value in content.items():
                    print(f"  {key}: {value}")
            else:
                print(f"  {content}")

# 慢查询分析演示
slow_query_demo = MySQLSlowQueryAnalysis()
slow_query_demo.show_slow_query_config()
slow_query_demo.show_analysis_tools()
slow_query_demo.show_optimization_recommendations()

9.4.3 性能基准测试

class MySQLBenchmarkTesting:
    def __init__(self):
        self.benchmark_tools = {
            "sysbench": {
                "描述": "多线程系统压力测试工具",
                "安装": """
# Ubuntu/Debian
sudo apt-get install sysbench

# CentOS/RHEL
sudo yum install sysbench

# 从源码编译
git clone https://github.com/akopytov/sysbench.git
cd sysbench
./autogen.sh
./configure
make
sudo make install
""",
                "基本用法": """
# 准备测试数据
sysbench oltp_read_write \
    --mysql-host=localhost \
    --mysql-port=3306 \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=100000 \
    prepare

# 运行读写混合测试
sysbench oltp_read_write \
    --mysql-host=localhost \
    --mysql-port=3306 \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=100000 \
    --threads=16 \
    --time=300 \
    --report-interval=10 \
    run

# 清理测试数据
sysbench oltp_read_write \
    --mysql-host=localhost \
    --mysql-port=3306 \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    cleanup
""",
                "测试场景": {
                    "只读测试": """
# OLTP只读测试
sysbench oltp_read_only \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=32 \
    --time=600 \
    --report-interval=10 \
    run

# 点查询测试
sysbench oltp_point_select \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=64 \
    --time=300 \
    run

# 范围查询测试
sysbench select_random_ranges \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=16 \
    --time=300 \
    run
""",
                    "只写测试": """
# OLTP只写测试
sysbench oltp_write_only \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=16 \
    --time=300 \
    --report-interval=10 \
    run

# 插入测试
sysbench oltp_insert \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=8 \
    --time=300 \
    run

# 更新测试
sysbench oltp_update_index \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=16 \
    --time=300 \
    run

# 删除插入测试
sysbench oltp_delete_insert \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=8 \
    --time=300 \
    run
""",
                    "混合负载测试": """
# 读写混合测试(默认比例)
sysbench oltp_read_write \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=20 \
    --table-size=1000000 \
    --threads=32 \
    --time=1800 \
    --report-interval=30 \
    --percentile=95 \
    run

# 自定义读写比例
sysbench oltp_read_write \
    --mysql-host=localhost \
    --mysql-user=test \
    --mysql-password=password \
    --mysql-db=testdb \
    --tables=10 \
    --table-size=1000000 \
    --threads=16 \
    --time=600 \
    --point-selects=10 \
    --simple-ranges=1 \
    --sum-ranges=1 \
    --order-ranges=1 \
    --distinct-ranges=1 \
    --index-updates=1 \
    --non-index-updates=1 \
    run
"""
                }
            },
            "mysqlslap": {
                "描述": "MySQL官方压力测试工具",
                "基本用法": """
# 基本压力测试
mysqlslap --user=test --password=password \
    --host=localhost \
    --concurrency=50 \
    --iterations=10 \
    --number-of-queries=1000 \
    --auto-generate-sql

# 自定义SQL测试
mysqlslap --user=test --password=password \
    --host=localhost \
    --concurrency=20 \
    --iterations=5 \
    --query="SELECT * FROM users WHERE id = FLOOR(RAND() * 10000)" \
    --number-of-queries=10000

# 创建表并测试
mysqlslap --user=test --password=password \
    --host=localhost \
    --concurrency=10 \
    --iterations=3 \
    --auto-generate-sql \
    --auto-generate-sql-add-autoincrement \
    --auto-generate-sql-load-type=mixed \
    --number-of-queries=1000 \
    --verbose
""",
                "测试场景": {
                    "并发连接测试": """
# 测试不同并发级别
for concurrency in 1 5 10 20 50 100; do
    echo "Testing with $concurrency concurrent connections"
    mysqlslap --user=test --password=password \
        --host=localhost \
        --concurrency=$concurrency \
        --iterations=5 \
        --auto-generate-sql \
        --number-of-queries=1000 \
        --verbose
done
""",
                    "查询类型测试": """
# SELECT测试
mysqlslap --user=test --password=password \
    --host=localhost \
    --concurrency=20 \
    --iterations=5 \
    --auto-generate-sql \
    --auto-generate-sql-load-type=read \
    --number-of-queries=10000

# INSERT测试
mysqlslap --user=test --password=password \
    --host=localhost \
    --concurrency=10 \
    --iterations=3 \
    --auto-generate-sql \
    --auto-generate-sql-load-type=write \
    --number-of-queries=5000

# 混合测试
mysqlslap --user=test --password=password \
    --host=localhost \
    --concurrency=15 \
    --iterations=5 \
    --auto-generate-sql \
    --auto-generate-sql-load-type=mixed \
    --number-of-queries=8000
"""
                }
            },
            "自定义基准测试": """
#!/bin/bash
# MySQL性能基准测试脚本

MYSQL_HOST="localhost"
MYSQL_USER="test"
MYSQL_PASSWORD="password"
MYSQL_DB="benchmark_test"
RESULT_DIR="/tmp/mysql_benchmark_$(date +%Y%m%d_%H%M%S)"

mkdir -p $RESULT_DIR

echo "=== MySQL Performance Benchmark Test ===" | tee $RESULT_DIR/summary.txt
echo "Start time: $(date)" | tee -a $RESULT_DIR/summary.txt
echo "Host: $MYSQL_HOST" | tee -a $RESULT_DIR/summary.txt
echo "Database: $MYSQL_DB" | tee -a $RESULT_DIR/summary.txt
echo | tee -a $RESULT_DIR/summary.txt

# 创建测试数据库
mysql -h$MYSQL_HOST -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $MYSQL_DB; CREATE DATABASE $MYSQL_DB;"

# 测试配置数组
declare -a THREAD_COUNTS=(1 4 8 16 32 64)
declare -a TABLE_SIZES=(10000 100000 1000000)
declare -a TEST_TYPES=("oltp_read_only" "oltp_write_only" "oltp_read_write")

# 系统信息收集
echo "=== System Information ===" | tee -a $RESULT_DIR/summary.txt
echo "CPU: $(grep 'model name' /proc/cpuinfo | head -1 | cut -d':' -f2 | xargs)" | tee -a $RESULT_DIR/summary.txt
echo "CPU Cores: $(nproc)" | tee -a $RESULT_DIR/summary.txt
echo "Memory: $(free -h | grep Mem | awk '{print $2}')" | tee -a $RESULT_DIR/summary.txt
echo "Disk: $(df -h / | tail -1 | awk '{print $2}')" | tee -a $RESULT_DIR/summary.txt
echo | tee -a $RESULT_DIR/summary.txt

# MySQL配置信息
echo "=== MySQL Configuration ===" | tee -a $RESULT_DIR/summary.txt
mysql -h$MYSQL_HOST -u$MYSQL_USER -p$MYSQL_PASSWORD -e "
SELECT 
    VARIABLE_NAME, 
    VARIABLE_VALUE 
FROM performance_schema.global_variables 
WHERE VARIABLE_NAME IN (
    'innodb_buffer_pool_size',
    'innodb_log_file_size',
    'innodb_flush_log_at_trx_commit',
    'sync_binlog',
    'max_connections',
    'query_cache_size'
);
" | tee -a $RESULT_DIR/summary.txt
echo | tee -a $RESULT_DIR/summary.txt

# 运行基准测试
for table_size in "${TABLE_SIZES[@]}"; do
    for test_type in "${TEST_TYPES[@]}"; do
        echo "=== Testing $test_type with table size $table_size ===" | tee -a $RESULT_DIR/summary.txt
        
        # 准备数据
        echo "Preparing test data..." | tee -a $RESULT_DIR/summary.txt
        sysbench $test_type \
            --mysql-host=$MYSQL_HOST \
            --mysql-user=$MYSQL_USER \
            --mysql-password=$MYSQL_PASSWORD \
            --mysql-db=$MYSQL_DB \
            --tables=10 \
            --table-size=$table_size \
            prepare > /dev/null 2>&1
        
        for threads in "${THREAD_COUNTS[@]}"; do
            echo "  Testing with $threads threads..." | tee -a $RESULT_DIR/summary.txt
            
            # 运行测试
            TEST_RESULT=$(sysbench $test_type \
                --mysql-host=$MYSQL_HOST \
                --mysql-user=$MYSQL_USER \
                --mysql-password=$MYSQL_PASSWORD \
                --mysql-db=$MYSQL_DB \
                --tables=10 \
                --table-size=$table_size \
                --threads=$threads \
                --time=60 \
                --report-interval=10 \
                run 2>&1)
            
            # 提取关键指标
            TPS=$(echo "$TEST_RESULT" | grep "transactions:" | awk '{print $3}' | sed 's/(//')
            QPS=$(echo "$TEST_RESULT" | grep "queries:" | awk '{print $3}' | sed 's/(//')
            LATENCY_95=$(echo "$TEST_RESULT" | grep "95th percentile:" | awk '{print $3}')
            
            echo "    TPS: $TPS, QPS: $QPS, 95th Latency: $LATENCY_95" | tee -a $RESULT_DIR/summary.txt
            
            # 保存详细结果
            echo "$TEST_RESULT" > "$RESULT_DIR/${test_type}_${table_size}_${threads}threads.txt"
        done
        
        # 清理数据
        sysbench $test_type \
            --mysql-host=$MYSQL_HOST \
            --mysql-user=$MYSQL_USER \
            --mysql-password=$MYSQL_PASSWORD \
            --mysql-db=$MYSQL_DB \
            --tables=10 \
            cleanup > /dev/null 2>&1
        
        echo | tee -a $RESULT_DIR/summary.txt
    done
done

echo "End time: $(date)" | tee -a $RESULT_DIR/summary.txt
echo "Results saved to: $RESULT_DIR" | tee -a $RESULT_DIR/summary.txt

# 生成性能报告
python3 << EOF
import os
import re
import json
from datetime import datetime

result_dir = "$RESULT_DIR"
report_data = {}

# 解析测试结果
for filename in os.listdir(result_dir):
    if filename.endswith('.txt') and filename != 'summary.txt':
        parts = filename.replace('.txt', '').split('_')
        if len(parts) >= 3:
            test_type = parts[0] + '_' + parts[1]
            table_size = parts[2]
            threads = parts[3].replace('threads', '')
            
            with open(os.path.join(result_dir, filename), 'r') as f:
                content = f.read()
                
            # 提取性能指标
            tps_match = re.search(r'transactions:\s+\d+\s+\((\d+\.\d+)\s+per sec\.\)', content)
            qps_match = re.search(r'queries:\s+\d+\s+\((\d+\.\d+)\s+per sec\.\)', content)
            latency_match = re.search(r'95th percentile:\s+(\d+\.\d+)', content)
            
            if tps_match and qps_match:
                key = f"{test_type}_{table_size}"
                if key not in report_data:
                    report_data[key] = []
                
                report_data[key].append({
                    'threads': int(threads),
                    'tps': float(tps_match.group(1)),
                    'qps': float(qps_match.group(1)),
                    'latency_95': float(latency_match.group(1)) if latency_match else 0
                })

# 生成JSON报告
with open(os.path.join(result_dir, 'benchmark_report.json'), 'w') as f:
    json.dump(report_data, f, indent=2)

print(f"Benchmark report generated: {os.path.join(result_dir, 'benchmark_report.json')}")
EOF

echo "Benchmark test completed. Check $RESULT_DIR for detailed results."
"""
        }
        
        self.performance_metrics = {
            "关键性能指标": {
                "TPS (Transactions Per Second)": "每秒事务数,衡量数据库事务处理能力",
                "QPS (Queries Per Second)": "每秒查询数,衡量数据库查询处理能力",
                "响应时间 (Response Time)": "查询执行时间,包括平均值、95%分位数、99%分位数",
                "并发连接数 (Concurrent Connections)": "同时处理的连接数",
                "CPU使用率 (CPU Utilization)": "数据库服务器CPU使用情况",
                "内存使用率 (Memory Utilization)": "数据库缓冲池和系统内存使用情况",
                "磁盘I/O (Disk I/O)": "磁盘读写操作的频率和延迟",
                "网络I/O (Network I/O)": "网络传输的吞吐量和延迟"
            },
            "性能基准参考值": {
                "小型应用": {
                    "TPS": "100-1,000",
                    "QPS": "1,000-10,000",
                    "响应时间": "< 100ms (95%)",
                    "并发连接": "10-100"
                },
                "中型应用": {
                    "TPS": "1,000-10,000",
                    "QPS": "10,000-100,000",
                    "响应时间": "< 50ms (95%)",
                    "并发连接": "100-1,000"
                },
                "大型应用": {
                    "TPS": "10,000+",
                    "QPS": "100,000+",
                    "响应时间": "< 20ms (95%)",
                    "并发连接": "1,000+"
                }
            }
        }
        
        self.result_analysis = {
            "性能瓶颈识别": {
                "CPU瓶颈": {
                    "症状": ["CPU使用率持续>80%", "响应时间随并发增加而线性增长", "TPS无法随线程数增加而提升"],
                    "解决方案": ["优化SQL查询", "添加索引", "升级CPU", "读写分离"]
                },
                "内存瓶颈": {
                    "症状": ["大量磁盘I/O", "缓冲池命中率低", "频繁的内存交换"],
                    "解决方案": ["增加内存", "优化innodb_buffer_pool_size", "优化查询减少内存使用"]
                },
                "磁盘I/O瓶颈": {
                    "症状": ["磁盘使用率高", "I/O等待时间长", "响应时间波动大"],
                    "解决方案": ["使用SSD", "优化磁盘配置", "调整innodb_flush_log_at_trx_commit", "增加磁盘并行度"]
                },
                "锁竞争瓶颈": {
                    "症状": ["大量锁等待", "死锁频繁", "并发性能差"],
                    "解决方案": ["优化事务大小", "减少事务持有时间", "优化索引设计", "调整隔离级别"]
                }
            },
            "性能优化建议": {
                "硬件优化": [
                    "使用SSD替代机械硬盘",
                    "增加内存容量",
                    "使用多核CPU",
                    "配置RAID提高I/O性能",
                    "使用高速网络"
                ],
                "配置优化": [
                    "调整innodb_buffer_pool_size",
                    "优化innodb_log_file_size",
                    "配置合适的max_connections",
                    "调整query_cache_size",
                    "优化thread_cache_size"
                ],
                "架构优化": [
                    "实施读写分离",
                    "使用连接池",
                    "实施分库分表",
                    "使用缓存系统",
                    "考虑分布式架构"
                ]
            }
        }
    
    def show_benchmark_tools(self):
        """显示基准测试工具"""
        print("MySQL性能基准测试工具:")
        print("=" * 60)
        
        for tool_name, tool_info in self.benchmark_tools.items():
            print(f"\n{tool_name}:")
            
            if isinstance(tool_info, dict):
                for key, value in tool_info.items():
                    if key == "测试场景":
                        print(f"\n  {key}:")
                        for scenario_name, scenario_content in value.items():
                            print(f"\n    {scenario_name}:{scenario_content}")
                    else:
                        print(f"\n  {key}:{value}")
            else:
                print(f"  {tool_info}")
    
    def show_performance_metrics(self):
        """显示性能指标"""
        print("\n性能指标说明:")
        print("=" * 50)
        
        for category, metrics in self.performance_metrics.items():
            print(f"\n{category}:")
            
            if isinstance(metrics, dict):
                for metric_name, metric_desc in metrics.items():
                    if isinstance(metric_desc, dict):
                        print(f"\n  {metric_name}:")
                        for key, value in metric_desc.items():
                            print(f"    {key}: {value}")
                    else:
                        print(f"  {metric_name}: {metric_desc}")
    
    def show_result_analysis(self):
        """显示结果分析"""
        print("\n性能测试结果分析:")
        print("=" * 50)
        
        for category, analysis in self.result_analysis.items():
            print(f"\n{category}:")
            
            for item_name, item_content in analysis.items():
                print(f"\n  {item_name}:")
                
                if isinstance(item_content, dict):
                    for key, value in item_content.items():
                        print(f"    {key}:")
                        if isinstance(value, list):
                            for v in value:
                                print(f"      • {v}")
                        else:
                            print(f"      {value}")
                elif isinstance(item_content, list):
                    for item in item_content:
                        print(f"    • {item}")

# 性能基准测试演示
benchmark_demo = MySQLBenchmarkTesting()
benchmark_demo.show_benchmark_tools()
benchmark_demo.show_performance_metrics()
benchmark_demo.show_result_analysis()

9.5 监控系统集成

9.5.1 Prometheus + Grafana 监控

class MySQLPrometheusMonitoring:
    def __init__(self):
        self.prometheus_config = {
            "mysqld_exporter配置": """
# 下载和安装mysqld_exporter
wget https://github.com/prometheus/mysqld_exporter/releases/download/v0.14.0/mysqld_exporter-0.14.0.linux-amd64.tar.gz
tar xvf mysqld_exporter-0.14.0.linux-amd64.tar.gz
sudo mv mysqld_exporter-0.14.0.linux-amd64/mysqld_exporter /usr/local/bin/

# 创建MySQL监控用户
mysql -u root -p << EOF
CREATE USER 'exporter'@'localhost' IDENTIFIED BY 'exporter_password';
GRANT PROCESS, REPLICATION CLIENT ON *.* TO 'exporter'@'localhost';
GRANT SELECT ON performance_schema.* TO 'exporter'@'localhost';
FLUSH PRIVILEGES;
EOF

# 创建配置文件
sudo mkdir -p /etc/mysqld_exporter
sudo tee /etc/mysqld_exporter/.my.cnf << EOF
[client]
user=exporter
password=exporter_password
host=localhost
port=3306
EOF

sudo chmod 600 /etc/mysqld_exporter/.my.cnf

# 创建systemd服务
sudo tee /etc/systemd/system/mysqld_exporter.service << EOF
[Unit]
Description=MySQL Exporter
After=network.target

[Service]
Type=simple
Restart=always
User=prometheus
Group=prometheus
ExecStart=/usr/local/bin/mysqld_exporter \
  --config.my-cnf=/etc/mysqld_exporter/.my.cnf \
  --collect.global_status \
  --collect.info_schema.innodb_metrics \
  --collect.auto_increment.columns \
  --collect.info_schema.processlist \
  --collect.binlog_size \
  --collect.info_schema.tablestats \
  --collect.global_variables \
  --collect.info_schema.query_response_time \
  --collect.info_schema.userstats \
  --collect.info_schema.tables \
  --collect.perf_schema.tablelocks \
  --collect.perf_schema.file_events \
  --collect.perf_schema.eventswaits \
  --collect.perf_schema.indexiowaits \
  --collect.perf_schema.tableiowaits \
  --collect.slave_status \
  --web.listen-address=0.0.0.0:9104

[Install]
WantedBy=multi-user.target
EOF

# 启动服务
sudo systemctl daemon-reload
sudo systemctl enable mysqld_exporter
sudo systemctl start mysqld_exporter

# 检查状态
sudo systemctl status mysqld_exporter
curl http://localhost:9104/metrics
""",
            "prometheus.yml配置": """
# Prometheus配置文件
global:
  scrape_interval: 15s
  evaluation_interval: 15s

rule_files:
  - "mysql_rules.yml"

alerting:
  alertmanagers:
    - static_configs:
        - targets:
          - alertmanager:9093

scrape_configs:
  - job_name: 'prometheus'
    static_configs:
      - targets: ['localhost:9090']

  - job_name: 'mysql'
    static_configs:
      - targets: ['localhost:9104']
    scrape_interval: 5s
    metrics_path: /metrics
    params:
      collect[]:
        - mysql.global_status
        - mysql.global_variables
        - mysql.slave_status
        - mysql.info_schema.innodb_metrics
        - mysql.info_schema.processlist
        - mysql.info_schema.tables
        - mysql.info_schema.tablestats
        - mysql.perf_schema.tableiowaits
        - mysql.perf_schema.indexiowaits
        - mysql.perf_schema.tablelocks

  - job_name: 'node'
    static_configs:
      - targets: ['localhost:9100']
""",
            "告警规则配置": """
# mysql_rules.yml
groups:
  - name: mysql.rules
    rules:
      # MySQL服务可用性
      - alert: MySQLDown
        expr: mysql_up == 0
        for: 0m
        labels:
          severity: critical
        annotations:
          summary: "MySQL instance is down"
          description: "MySQL instance {{ $labels.instance }} has been down for more than 0 minutes."

      # 连接数告警
      - alert: MySQLTooManyConnections
        expr: mysql_global_status_threads_connected / mysql_global_variables_max_connections * 100 > 80
        for: 2m
        labels:
          severity: warning
        annotations:
          summary: "MySQL too many connections"
          description: "MySQL instance {{ $labels.instance }} has too many connections ({{ $value }}% of max)."

      # 慢查询告警
      - alert: MySQLSlowQueries
        expr: increase(mysql_global_status_slow_queries[1m]) > 10
        for: 2m
        labels:
          severity: warning
        annotations:
          summary: "MySQL slow queries"
          description: "MySQL instance {{ $labels.instance }} has {{ $value }} slow queries in the last minute."

      # InnoDB缓冲池命中率
      - alert: MySQLInnoDBBufferPoolHitRate
        expr: |
          (
            rate(mysql_global_status_innodb_buffer_pool_read_requests[5m]) - 
            rate(mysql_global_status_innodb_buffer_pool_reads[5m])
          ) / rate(mysql_global_status_innodb_buffer_pool_read_requests[5m]) * 100 < 95
        for: 5m
        labels:
          severity: warning
        annotations:
          summary: "MySQL InnoDB buffer pool hit rate is low"
          description: "MySQL instance {{ $labels.instance }} InnoDB buffer pool hit rate is {{ $value }}%."

      # 复制延迟告警
      - alert: MySQLReplicationLag
        expr: mysql_slave_lag_seconds > 30
        for: 1m
        labels:
          severity: warning
        annotations:
          summary: "MySQL replication lag"
          description: "MySQL instance {{ $labels.instance }} replication lag is {{ $value }} seconds."

      # 磁盘空间告警
      - alert: MySQLDiskSpaceUsage
        expr: |
          (
            mysql_global_status_innodb_data_file_size_bytes + 
            mysql_global_status_innodb_log_file_size_bytes
          ) / 1024 / 1024 / 1024 > 50
        for: 5m
        labels:
          severity: warning
        annotations:
          summary: "MySQL disk space usage is high"
          description: "MySQL instance {{ $labels.instance }} is using {{ $value }}GB of disk space."

      # 死锁告警
      - alert: MySQLDeadlocks
        expr: increase(mysql_global_status_innodb_deadlocks[1m]) > 0
        for: 0m
        labels:
          severity: warning
        annotations:
          summary: "MySQL deadlocks detected"
          description: "MySQL instance {{ $labels.instance }} has {{ $value }} deadlocks in the last minute."

      # 表锁等待告警
      - alert: MySQLTableLockWaits
        expr: increase(mysql_global_status_table_locks_waited[1m]) > 10
        for: 2m
        labels:
          severity: warning
        annotations:
          summary: "MySQL table lock waits"
          description: "MySQL instance {{ $labels.instance }} has {{ $value }} table lock waits in the last minute."

      # 临时表创建告警
      - alert: MySQLTempTablesCreated
        expr: increase(mysql_global_status_created_tmp_disk_tables[1m]) > 100
        for: 5m
        labels:
          severity: warning
        annotations:
          summary: "MySQL creating too many temp tables on disk"
          description: "MySQL instance {{ $labels.instance }} created {{ $value }} temp tables on disk in the last minute."
"""
        }
        
        self.grafana_dashboard = {
            "MySQL Overview Dashboard": """
{
  "dashboard": {
    "id": null,
    "title": "MySQL Overview",
    "tags": ["mysql"],
    "timezone": "browser",
    "panels": [
      {
        "id": 1,
        "title": "MySQL Status",
        "type": "stat",
        "targets": [
          {
            "expr": "mysql_up",
            "legendFormat": "MySQL Up"
          }
        ],
        "fieldConfig": {
          "defaults": {
            "color": {
              "mode": "thresholds"
            },
            "thresholds": {
              "steps": [
                {"color": "red", "value": 0},
                {"color": "green", "value": 1}
              ]
            }
          }
        },
        "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}
      },
      {
        "id": 2,
        "title": "Connections",
        "type": "timeseries",
        "targets": [
          {
            "expr": "mysql_global_status_threads_connected",
            "legendFormat": "Connected"
          },
          {
            "expr": "mysql_global_status_threads_running",
            "legendFormat": "Running"
          },
          {
            "expr": "mysql_global_variables_max_connections",
            "legendFormat": "Max Connections"
          }
        ],
        "gridPos": {"h": 8, "w": 12, "x": 12, "y": 0}
      },
      {
        "id": 3,
        "title": "Query Rate",
        "type": "timeseries",
        "targets": [
          {
            "expr": "rate(mysql_global_status_queries[1m])",
            "legendFormat": "Queries/sec"
          },
          {
            "expr": "rate(mysql_global_status_questions[1m])",
            "legendFormat": "Questions/sec"
          }
        ],
        "gridPos": {"h": 8, "w": 12, "x": 0, "y": 8}
      },
      {
        "id": 4,
        "title": "InnoDB Buffer Pool",
        "type": "timeseries",
        "targets": [
          {
            "expr": "mysql_global_status_innodb_buffer_pool_pages_total",
            "legendFormat": "Total Pages"
          },
          {
            "expr": "mysql_global_status_innodb_buffer_pool_pages_free",
            "legendFormat": "Free Pages"
          },
          {
            "expr": "mysql_global_status_innodb_buffer_pool_pages_data",
            "legendFormat": "Data Pages"
          }
        ],
        "gridPos": {"h": 8, "w": 12, "x": 12, "y": 8}
      },
      {
        "id": 5,
        "title": "Slow Queries",
        "type": "timeseries",
        "targets": [
          {
            "expr": "rate(mysql_global_status_slow_queries[1m])",
            "legendFormat": "Slow Queries/sec"
          }
        ],
        "gridPos": {"h": 8, "w": 12, "x": 0, "y": 16}
      },
      {
        "id": 6,
        "title": "InnoDB I/O",
        "type": "timeseries",
        "targets": [
          {
            "expr": "rate(mysql_global_status_innodb_data_reads[1m])",
            "legendFormat": "Reads/sec"
          },
          {
            "expr": "rate(mysql_global_status_innodb_data_writes[1m])",
            "legendFormat": "Writes/sec"
          }
        ],
        "gridPos": {"h": 8, "w": 12, "x": 12, "y": 16}
      }
    ],
    "time": {
      "from": "now-1h",
      "to": "now"
    },
    "refresh": "5s"
  }
}
"""
        }
        
        self.monitoring_scripts = {
            "自动化部署脚本": """
#!/bin/bash
# MySQL监控系统自动化部署脚本

set -e

echo "=== MySQL Monitoring Stack Deployment ==="

# 创建监控用户
sudo useradd --no-create-home --shell /bin/false prometheus 2>/dev/null || true
sudo useradd --no-create-home --shell /bin/false mysqld_exporter 2>/dev/null || true

# 创建目录
sudo mkdir -p /etc/prometheus
sudo mkdir -p /var/lib/prometheus
sudo mkdir -p /etc/mysqld_exporter
sudo mkdir -p /var/log/prometheus

# 下载和安装Prometheus
echo "Installing Prometheus..."
PROMETHEUS_VERSION="2.40.0"
wget https://github.com/prometheus/prometheus/releases/download/v${PROMETHEUS_VERSION}/prometheus-${PROMETHEUS_VERSION}.linux-amd64.tar.gz
tar xvf prometheus-${PROMETHEUS_VERSION}.linux-amd64.tar.gz
sudo mv prometheus-${PROMETHEUS_VERSION}.linux-amd64/prometheus /usr/local/bin/
sudo mv prometheus-${PROMETHEUS_VERSION}.linux-amd64/promtool /usr/local/bin/
sudo mv prometheus-${PROMETHEUS_VERSION}.linux-amd64/consoles /etc/prometheus/
sudo mv prometheus-${PROMETHEUS_VERSION}.linux-amd64/console_libraries /etc/prometheus/
rm -rf prometheus-${PROMETHEUS_VERSION}.linux-amd64*

# 下载和安装mysqld_exporter
echo "Installing mysqld_exporter..."
MYSQLD_EXPORTER_VERSION="0.14.0"
wget https://github.com/prometheus/mysqld_exporter/releases/download/v${MYSQLD_EXPORTER_VERSION}/mysqld_exporter-${MYSQLD_EXPORTER_VERSION}.linux-amd64.tar.gz
tar xvf mysqld_exporter-${MYSQLD_EXPORTER_VERSION}.linux-amd64.tar.gz
sudo mv mysqld_exporter-${MYSQLD_EXPORTER_VERSION}.linux-amd64/mysqld_exporter /usr/local/bin/
rm -rf mysqld_exporter-${MYSQLD_EXPORTER_VERSION}.linux-amd64*

# 设置权限
sudo chown prometheus:prometheus /usr/local/bin/prometheus
sudo chown prometheus:prometheus /usr/local/bin/promtool
sudo chown -R prometheus:prometheus /etc/prometheus
sudo chown -R prometheus:prometheus /var/lib/prometheus
sudo chown -R prometheus:prometheus /var/log/prometheus
sudo chown mysqld_exporter:mysqld_exporter /usr/local/bin/mysqld_exporter
sudo chown -R mysqld_exporter:mysqld_exporter /etc/mysqld_exporter

# 创建Prometheus配置
sudo tee /etc/prometheus/prometheus.yml << 'EOF'
global:
  scrape_interval: 15s
  evaluation_interval: 15s

rule_files:
  - "mysql_rules.yml"

scrape_configs:
  - job_name: 'prometheus'
    static_configs:
      - targets: ['localhost:9090']

  - job_name: 'mysql'
    static_configs:
      - targets: ['localhost:9104']
    scrape_interval: 5s
EOF

# 创建MySQL告警规则
sudo tee /etc/prometheus/mysql_rules.yml << 'EOF'
groups:
  - name: mysql.rules
    rules:
      - alert: MySQLDown
        expr: mysql_up == 0
        for: 0m
        labels:
          severity: critical
        annotations:
          summary: "MySQL instance is down"
          description: "MySQL instance {{ $labels.instance }} has been down."

      - alert: MySQLTooManyConnections
        expr: mysql_global_status_threads_connected / mysql_global_variables_max_connections * 100 > 80
        for: 2m
        labels:
          severity: warning
        annotations:
          summary: "MySQL too many connections"
          description: "MySQL instance {{ $labels.instance }} has {{ $value }}% connections used."
EOF

# 创建mysqld_exporter配置
echo "Please enter MySQL monitoring user credentials:"
read -p "Username: " MYSQL_USER
read -s -p "Password: " MYSQL_PASSWORD
echo

sudo tee /etc/mysqld_exporter/.my.cnf << EOF
[client]
user=${MYSQL_USER}
password=${MYSQL_PASSWORD}
host=localhost
port=3306
EOF

sudo chmod 600 /etc/mysqld_exporter/.my.cnf

# 创建systemd服务文件
sudo tee /etc/systemd/system/prometheus.service << 'EOF'
[Unit]
Description=Prometheus
Wants=network-online.target
After=network-online.target

[Service]
User=prometheus
Group=prometheus
Type=simple
ExecStart=/usr/local/bin/prometheus \
    --config.file /etc/prometheus/prometheus.yml \
    --storage.tsdb.path /var/lib/prometheus/ \
    --web.console.templates=/etc/prometheus/consoles \
    --web.console.libraries=/etc/prometheus/console_libraries \
    --web.listen-address=0.0.0.0:9090 \
    --web.enable-lifecycle

[Install]
WantedBy=multi-user.target
EOF

sudo tee /etc/systemd/system/mysqld_exporter.service << 'EOF'
[Unit]
Description=MySQL Exporter
After=network.target

[Service]
Type=simple
Restart=always
User=mysqld_exporter
Group=mysqld_exporter
ExecStart=/usr/local/bin/mysqld_exporter \
  --config.my-cnf=/etc/mysqld_exporter/.my.cnf \
  --collect.global_status \
  --collect.info_schema.innodb_metrics \
  --collect.auto_increment.columns \
  --collect.info_schema.processlist \
  --collect.binlog_size \
  --collect.info_schema.tablestats \
  --collect.global_variables \
  --collect.slave_status \
  --web.listen-address=0.0.0.0:9104

[Install]
WantedBy=multi-user.target
EOF

# 启动服务
sudo systemctl daemon-reload
sudo systemctl enable prometheus
sudo systemctl enable mysqld_exporter
sudo systemctl start prometheus
sudo systemctl start mysqld_exporter

# 检查服务状态
echo "Checking service status..."
sudo systemctl status prometheus --no-pager
sudo systemctl status mysqld_exporter --no-pager

echo "=== Deployment Complete ==="
echo "Prometheus: http://localhost:9090"
echo "MySQL Exporter: http://localhost:9104/metrics"
echo "Please configure Grafana to connect to Prometheus at http://localhost:9090"
"""
        }
    
    def show_prometheus_config(self):
        """显示Prometheus配置"""
        print("Prometheus + MySQL监控配置:")
        print("=" * 60)
        
        for config_name, config_content in self.prometheus_config.items():
            print(f"\n{config_name}:{config_content}")
    
    def show_grafana_dashboard(self):
        """显示Grafana仪表板"""
        print("\nGrafana仪表板配置:")
        print("=" * 50)
        
        for dashboard_name, dashboard_content in self.grafana_dashboard.items():
            print(f"\n{dashboard_name}:{dashboard_content}")
    
    def show_monitoring_scripts(self):
        """显示监控脚本"""
        print("\n监控系统部署脚本:")
        print("=" * 50)
        
        for script_name, script_content in self.monitoring_scripts.items():
            print(f"\n{script_name}:{script_content}")

# Prometheus监控演示
prometheus_demo = MySQLPrometheusMonitoring()
prometheus_demo.show_prometheus_config()
prometheus_demo.show_grafana_dashboard()
prometheus_demo.show_monitoring_scripts()

9.7 性能优化最佳实践

9.7.1 优化策略与方法论

1. 性能优化流程

# 性能优化标准流程
1. 性能基线建立
   - 收集当前性能指标
   - 建立性能基准
   - 识别性能瓶颈

2. 问题分析与定位
   - 分析慢查询日志
   - 检查系统资源使用
   - 识别热点数据和查询

3. 优化方案制定
   - 制定优化计划
   - 评估优化风险
   - 准备回滚方案

4. 实施与验证
   - 在测试环境验证
   - 生产环境实施
   - 监控优化效果

5. 持续监控与调整
   - 建立长期监控
   - 定期性能评估
   - 持续优化改进

2. 优化优先级矩阵

优化类型 实施难度 性能提升 优先级 说明
索引优化 1 最优先,效果明显
SQL重写 2 投入产出比高
配置调优 3 风险较低
架构优化 4 长期规划
硬件升级 5 成本考虑

3. 性能优化检查清单

class MySQLPerformanceChecklist:
    def __init__(self):
        self.checklist = {
            "索引检查": {
                "检查项目": [
                    "是否存在未使用的索引",
                    "是否存在重复索引",
                    "复合索引顺序是否合理",
                    "是否缺少必要的索引",
                    "索引选择性是否足够高"
                ],
                "检查SQL": """
                    SELECT 
                        table_schema,
                        table_name,
                        index_name,
                        cardinality,
                        sub_part,
                        packed,
                        nullable,
                        index_type
                    FROM information_schema.statistics 
                    WHERE table_schema NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys')
                    ORDER BY table_schema, table_name, seq_in_index;
                """
            },
            "表结构检查": {
                "检查项目": [
                    "表引擎是否合适",
                    "字段类型是否优化",
                    "是否存在大字段",
                    "表分区是否合理",
                    "表碎片是否严重"
                ],
                "检查SQL": """
                    SELECT 
                        table_schema,
                        table_name,
                        engine,
                        table_rows,
                        avg_row_length,
                        data_length,
                        index_length,
                        data_free
                    FROM information_schema.tables 
                    WHERE table_schema NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys')
                    ORDER BY data_length DESC;
                """
            },
            "查询性能检查": {
                "检查项目": [
                    "慢查询数量和频率",
                    "查询执行计划",
                    "临时表使用情况",
                    "排序操作优化",
                    "JOIN操作效率"
                ],
                "检查SQL": """
                    SELECT 
                        digest_text,
                        count_star,
                        avg_timer_wait/1000000000 as avg_time_sec,
                        sum_timer_wait/1000000000 as total_time_sec,
                        sum_rows_examined,
                        sum_rows_sent
                    FROM performance_schema.events_statements_summary_by_digest 
                    ORDER BY avg_timer_wait DESC 
                    LIMIT 10;
                """
            }
        }
    
    def show_checklist(self):
        """显示性能检查清单"""
        print("MySQL性能优化检查清单")
        print("=" * 50)
        
        for category, details in self.checklist.items():
            print(f"\n{category}:")
            print("-" * 30)
            
            print("检查项目:")
            for item in details["检查项目"]:
                print(f"  ✓ {item}")
            
            print("\n检查SQL:")
            print(details["检查SQL"])

# 性能检查清单演示
checklist = MySQLPerformanceChecklist()
checklist.show_checklist()

9.7.2 常见性能问题与解决方案

1. 查询性能问题

class MySQLQueryPerformanceIssues:
    def __init__(self):
        self.issues = {
            "慢查询过多": {
                "症状": [
                    "查询响应时间长",
                    "CPU使用率高",
                    "用户体验差"
                ],
                "原因分析": [
                    "缺少合适的索引",
                    "查询语句不优化",
                    "数据量过大",
                    "表结构设计不合理"
                ],
                "解决方案": """
                    -- 1. 启用慢查询日志
                    SET GLOBAL slow_query_log = 'ON';
                    SET GLOBAL long_query_time = 1;
                    SET GLOBAL log_queries_not_using_indexes = 'ON';
                    
                    -- 2. 分析执行计划
                    EXPLAIN FORMAT=JSON 
                    SELECT * FROM orders o 
                    JOIN customers c ON o.customer_id = c.id 
                    WHERE o.order_date >= '2024-01-01';
                    
                    -- 3. 优化索引
                    CREATE INDEX idx_order_date ON orders(order_date);
                    CREATE INDEX idx_customer_order ON orders(customer_id, order_date);
                    
                    -- 4. 查询重写
                    -- 原查询
                    SELECT * FROM products WHERE category_id IN (
                        SELECT id FROM categories WHERE name LIKE '%电子%'
                    );
                    
                    -- 优化后
                    SELECT p.* FROM products p 
                    JOIN categories c ON p.category_id = c.id 
                    WHERE c.name LIKE '%电子%';
                """
            },
            "锁等待严重": {
                "症状": [
                    "查询阻塞",
                    "连接数增加",
                    "响应时间不稳定"
                ],
                "原因分析": [
                    "事务时间过长",
                    "锁粒度过大",
                    "死锁频繁",
                    "并发访问冲突"
                ],
                "解决方案": """
                    -- 1. 检查锁等待
                    SELECT 
                        r.trx_id waiting_trx_id,
                        r.trx_mysql_thread_id waiting_thread,
                        r.trx_query waiting_query,
                        b.trx_id blocking_trx_id,
                        b.trx_mysql_thread_id blocking_thread,
                        b.trx_query blocking_query
                    FROM information_schema.innodb_lock_waits w
                    INNER JOIN information_schema.innodb_trx b ON b.trx_id = w.blocking_trx_id
                    INNER JOIN information_schema.innodb_trx r ON r.trx_id = w.requesting_trx_id;
                    
                    -- 2. 优化事务
                    -- 缩短事务时间
                    -- 减少锁持有时间
                    -- 使用合适的隔离级别
                    
                    -- 3. 索引优化
                    -- 减少锁定行数
                    -- 使用覆盖索引
                """
            }
        }
    
    def show_issues(self):
        """显示查询性能问题"""
        print("MySQL查询性能问题诊断")
        print("=" * 50)
        
        for issue, details in self.issues.items():
            print(f"\n问题: {issue}")
            print("-" * 30)
            
            print("症状:")
            for symptom in details["症状"]:
                print(f"  • {symptom}")
            
            print("\n原因分析:")
            for cause in details["原因分析"]:
                print(f"  • {cause}")
            
            print("\n解决方案:")
            print(details["解决方案"])

# 查询性能问题演示
query_issues = MySQLQueryPerformanceIssues()
query_issues.show_issues()

2. 连接性能问题

#!/bin/bash
# MySQL连接性能优化脚本

echo "=== MySQL连接性能优化 ==="

# 1. 检查连接状态
echo "1. 当前连接状态:"
mysql -e "SHOW STATUS LIKE 'Connections';"
mysql -e "SHOW STATUS LIKE 'Max_used_connections';"
mysql -e "SHOW STATUS LIKE 'Threads_connected';"
mysql -e "SHOW STATUS LIKE 'Threads_running';"

# 2. 检查连接错误
echo "\n2. 连接错误统计:"
mysql -e "SHOW STATUS LIKE 'Connection_errors%';"
mysql -e "SHOW STATUS LIKE 'Aborted_connects';"
mysql -e "SHOW STATUS LIKE 'Aborted_clients';"

# 3. 连接池配置建议
echo "\n3. 连接池配置建议:"
cat << 'EOF'
# my.cnf配置优化
[mysqld]
max_connections = 1000
max_connect_errors = 100000
connect_timeout = 10
wait_timeout = 28800
interactive_timeout = 28800
thread_cache_size = 16

# 应用层连接池配置(Java HikariCP示例)
maximumPoolSize=20
minimumIdle=5
connectionTimeout=30000
idleTimeout=600000
maxLifetime=1800000
leakDetectionThreshold=60000
EOF

# 4. 连接监控脚本
echo "\n4. 生成连接监控脚本:"
cat > /tmp/mysql_connection_monitor.sh << 'EOF'
#!/bin/bash
# MySQL连接监控脚本

while true; do
    echo "$(date): MySQL连接状态"
    
    # 当前连接数
    CURRENT_CONNECTIONS=$(mysql -e "SHOW STATUS LIKE 'Threads_connected';" | awk 'NR==2{print $2}')
    MAX_CONNECTIONS=$(mysql -e "SHOW VARIABLES LIKE 'max_connections';" | awk 'NR==2{print $2}')
    
    echo "当前连接数: $CURRENT_CONNECTIONS / $MAX_CONNECTIONS"
    
    # 连接使用率
    CONNECTION_USAGE=$(echo "scale=2; $CURRENT_CONNECTIONS * 100 / $MAX_CONNECTIONS" | bc)
    echo "连接使用率: ${CONNECTION_USAGE}%"
    
    # 告警检查
    if (( $(echo "$CONNECTION_USAGE > 80" | bc -l) )); then
        echo "警告: 连接使用率过高 (${CONNECTION_USAGE}%)"
        
        # 显示当前连接详情
        mysql -e "SELECT user, host, db, command, time, state FROM information_schema.processlist WHERE command != 'Sleep' ORDER BY time DESC LIMIT 10;"
    fi
    
    echo "---"
    sleep 30
done
EOF

chmod +x /tmp/mysql_connection_monitor.sh
echo "连接监控脚本已生成: /tmp/mysql_connection_monitor.sh"

9.7.3 故障排除与诊断

1. 性能故障诊断流程

#!/bin/bash
# MySQL性能故障诊断脚本

echo "=== MySQL性能故障诊断 ==="
echo "诊断时间: $(date)"
echo

# 配置参数
MYSQL_USER="root"
MYSQL_PASS=""
LOG_FILE="/tmp/mysql_diagnosis_$(date +%Y%m%d_%H%M%S).log"

# 创建诊断日志
exec > >(tee -a $LOG_FILE)
exec 2>&1

echo "诊断日志: $LOG_FILE"
echo

# 1. 系统资源检查
echo "1. 系统资源使用情况:"
echo "CPU使用率:"
if command -v top >/dev/null 2>&1; then
    top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1
else
    echo "top命令不可用"
fi

echo "\n内存使用情况:"
if command -v free >/dev/null 2>&1; then
    free -h
else
    echo "free命令不可用"
fi

echo "\n磁盘I/O:"
if command -v iostat >/dev/null 2>&1; then
    iostat -x 1 3
else
    echo "iostat命令不可用,请安装sysstat包"
fi

echo "\n磁盘空间:"
df -h

# 2. MySQL状态检查
echo
echo "2. MySQL状态检查:"

# 检查MySQL服务状态
if command -v systemctl >/dev/null 2>&1; then
    echo "MySQL服务状态:"
    systemctl status mysql --no-pager 2>/dev/null || systemctl status mysqld --no-pager 2>/dev/null
fi

# MySQL连接检查
echo "\nMySQL连接状态:"
mysql -u$MYSQL_USER -p$MYSQL_PASS -e "SHOW GLOBAL STATUS LIKE 'Threads_connected';" 2>/dev/null
mysql -u$MYSQL_USER -p$MYSQL_PASS -e "SHOW GLOBAL STATUS LIKE 'Queries';" 2>/dev/null
mysql -u$MYSQL_USER -p$MYSQL_PASS -e "SHOW GLOBAL STATUS LIKE 'Slow_queries';" 2>/dev/null

# 3. 当前连接检查
echo
echo "3. 当前连接状态:"
mysql -u$MYSQL_USER -p$MYSQL_PASS -e "SHOW PROCESSLIST;" 2>/dev/null | head -20

# 4. 锁等待检查
echo
echo "4. 锁等待情况:"
mysql -u$MYSQL_USER -p$MYSQL_PASS -e "SELECT * FROM performance_schema.data_locks LIMIT 10;" 2>/dev/null

# 5. 慢查询检查
echo
echo "5. 最近慢查询:"
if [ -f /var/log/mysql/slow.log ]; then
    tail -50 /var/log/mysql/slow.log
elif [ -f /var/log/mysqld-slow.log ]; then
    tail -50 /var/log/mysqld-slow.log
else
    echo "慢查询日志文件未找到"
fi

echo
echo "=== 诊断完成 ==="
echo "详细诊断结果已保存到: $LOG_FILE"

9.8 总结

本章详细介绍了MySQL性能优化与监控的各个方面,从基础的性能分析工具到高级的监控系统集成。通过系统性的学习和实践,您应该能够:

9.8.1 核心能力掌握

  1. 性能分析与诊断

    • 熟练使用Performance Schema、慢查询日志等内置工具
    • 掌握第三方工具如pt-query-digest的使用
    • 能够快速定位性能瓶颈和故障原因
  2. SQL优化技能

    • 编写高效的SQL查询语句
    • 合理设计和使用索引
    • 掌握查询重写和JOIN优化技巧
  3. 系统配置调优

    • 根据业务需求优化MySQL配置参数
    • 进行操作系统级别的性能调优
    • 合理配置硬件资源
  4. 监控体系建设

    • 构建完整的性能监控系统
    • 设置合理的告警阈值和规则
    • 实现自动化的故障检测和恢复
  5. 性能测试与评估

    • 进行科学的基准测试
    • 评估优化效果
    • 建立性能基线和趋势分析

9.8.2 实施建议

  1. 建立标准化流程

    • 制定性能优化的标准流程和检查清单
    • 建立性能问题的分类和处理机制
    • 定期进行性能评估和优化
  2. 持续监控改进

    • 建立长期的性能监控体系
    • 定期分析性能趋势和变化
    • 根据业务发展调整优化策略
  3. 团队能力建设

    • 培养团队的性能优化意识
    • 建立知识分享和经验积累机制
    • 定期进行技术培训和实践

9.8.3 下一步学习

性能优化是一个持续的过程,需要根据业务发展和数据增长不断调整和改进。建议:

  1. 深入学习:继续学习MySQL内核原理和高级特性
  2. 实践积累:在实际项目中应用所学知识,积累经验
  3. 技术跟进:关注MySQL新版本的性能改进和新特性
  4. 生态学习:学习相关的监控工具和性能分析技术

下一章我们将学习MySQL的安全与权限管理,确保数据库系统的安全性和数据保护。