rollback code

This commit is contained in:
咕谷酱
2025-08-22 15:07:50 +08:00
parent b1f0cbfed1
commit e293d7541b
12 changed files with 74 additions and 1710 deletions

View File

@@ -23,7 +23,7 @@ class EmailVerification(SQLModel, table=True):
is_used: bool = Field(default=False) # 是否已使用
used_at: datetime | None = Field(default=None)
ip_address: str | None = Field(default=None) # 请求IP
user_agent: str | None = Field(default=None, max_length=255) # 用户代理
user_agent: str | None = Field(default=None) # 用户代理
class LoginSession(SQLModel, table=True):
@@ -35,7 +35,7 @@ class LoginSession(SQLModel, table=True):
user_id: int = Field(sa_column=Column(BigInteger, ForeignKey("lazer_users.id"), nullable=False, index=True))
session_token: str = Field(unique=True, index=True) # 会话令牌
ip_address: str = Field() # 登录IP
user_agent: str | None = Field(default=None, max_length=255) # 用户代理限制长度为255字符
user_agent: str | None = Field(default=None)
country_code: str | None = Field(default=None)
is_verified: bool = Field(default=False) # 是否已验证
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC))

View File

@@ -206,19 +206,12 @@ async def get_message(
query = select(ChatMessage).where(ChatMessage.channel_id == channel_id)
if since > 0:
query = query.where(col(ChatMessage.message_id) > since)
# 获取 since 之后的消息,按时间正序
query = query.order_by(col(ChatMessage.message_id).asc()).limit(limit)
messages = (await session.exec(query)).all()
resp = [await ChatMessageResp.from_db(msg, session) for msg in messages]
else:
# 获取最新消息,先按倒序获取最新的,然后反转为时间正序
if until is not None:
query = query.where(col(ChatMessage.message_id) < until)
query = query.order_by(col(ChatMessage.message_id).desc()).limit(limit)
messages = (await session.exec(query)).all()
resp = [await ChatMessageResp.from_db(msg, session) for msg in messages]
# 反转为时间正序(最老的在前面)
resp.reverse()
if until is not None:
query = query.where(col(ChatMessage.message_id) < until)
query = query.order_by(col(ChatMessage.message_id).desc()).limit(limit)
messages = (await session.exec(query)).all()
resp = [await ChatMessageResp.from_db(msg, session) for msg in messages]
return resp

View File

@@ -5,7 +5,7 @@ import hashlib
from app.database.lazer_user import BASE_INCLUDES, User, UserResp
from app.database.team import Team, TeamMember, TeamRequest
from app.dependencies.database import Database, get_redis
from app.dependencies.database import Database
from app.dependencies.storage import get_storage_service
from app.dependencies.user import get_client_user
from app.models.notification import (
@@ -19,9 +19,8 @@ from app.utils import check_image
from .router import router
from fastapi import Depends, File, Form, HTTPException, Request, Security
from fastapi import Depends, File, Form, HTTPException, Path, Request, Security
from pydantic import BaseModel
from redis.asyncio import Redis
from sqlmodel import exists, select
@@ -157,7 +156,7 @@ async def update_team(
@router.delete("/team/{team_id}", name="删除战队", status_code=204)
async def delete_team(
session: Database,
team_id: int,
team_id: int = Path(..., description="战队 ID"),
current_user: User = Security(get_client_user),
):
team = await session.get(Team, team_id)
@@ -185,7 +184,7 @@ class TeamQueryResp(BaseModel):
@router.get("/team/{team_id}", name="查询战队", response_model=TeamQueryResp)
async def get_team(
session: Database,
team_id: int,
team_id: int = Path(..., description="战队 ID"),
):
members = (
await session.exec(select(TeamMember).where(TeamMember.team_id == team_id))
@@ -202,9 +201,8 @@ async def get_team(
@router.post("/team/{team_id}/request", name="请求加入战队", status_code=204)
async def request_join_team(
session: Database,
team_id: int,
team_id: int = Path(..., description="战队 ID"),
current_user: User = Security(get_client_user),
redis: Redis = Depends(get_redis),
):
team = await session.get(Team, team_id)
if not team:
@@ -237,8 +235,8 @@ async def request_join_team(
async def handle_request(
req: Request,
session: Database,
team_id: int,
user_id: int,
team_id: int = Path(..., description="战队 ID"),
user_id: int = Path(..., description="用户 ID"),
current_user: User = Security(get_client_user),
):
team = await session.get(Team, team_id)
@@ -281,11 +279,11 @@ async def handle_request(
await session.commit()
@router.delete("/team/{team_id}/{user_id}", name="踢出成员 / 退出队", status_code=204)
@router.delete("/team/{team_id}/{user_id}", name="踢出成员 / 退出", status_code=204)
async def kick_member(
session: Database,
team_id: int,
user_id: int,
team_id: int = Path(..., description="战队 ID"),
user_id: int = Path(..., description="用户 ID"),
current_user: User = Security(get_client_user),
):
team = await session.get(Team, team_id)

View File

@@ -218,10 +218,6 @@ This email was sent automatically, please do not reply.
# 生成新的验证码
code = EmailVerificationService.generate_verification_code()
# 解析用户代理字符串
from app.utils import parse_user_agent
parsed_user_agent = parse_user_agent(user_agent, max_length=255)
# 创建验证记录
verification = EmailVerification(
user_id=user_id,
@@ -229,7 +225,7 @@ This email was sent automatically, please do not reply.
verification_code=code,
expires_at=datetime.now(UTC) + timedelta(minutes=10), # 10分钟过期
ip_address=ip_address,
user_agent=parsed_user_agent # 使用解析后的用户代理
user_agent=user_agent
)
db.add(verification)
@@ -392,18 +388,13 @@ class LoginSessionService:
is_new_location: bool = False
) -> LoginSession:
"""创建登录会话"""
from app.utils import parse_user_agent
# 解析用户代理字符串,提取关键信息
parsed_user_agent = parse_user_agent(user_agent, max_length=255)
session_token = EmailVerificationService.generate_session_token()
session = LoginSession(
user_id=user_id,
session_token=session_token,
ip_address=ip_address,
user_agent=parsed_user_agent, # 使用解析后的用户代理
user_agent=user_agent,
country_code=country_code,
is_new_location=is_new_location,
expires_at=datetime.now(UTC) + timedelta(hours=24), # 24小时过期

View File

@@ -20,7 +20,6 @@ async def maintain_playing_users_online_status():
定期刷新正在游玩用户的metadata在线标记
确保他们在游玩过程中显示为在线状态。
但不会恢复已经退出的用户的在线状态。
"""
redis_sync = get_redis_message()
redis_async = get_redis()
@@ -32,25 +31,17 @@ async def maintain_playing_users_online_status():
if not playing_users:
return
logger.debug(f"Checking online status for {len(playing_users)} playing users")
logger.debug(f"Maintaining online status for {len(playing_users)} playing users")
# 仅为当前有效连接的用户刷新在线状态
updated_count = 0
# 为每个游玩用户刷新metadata在线标记
for user_id in playing_users:
user_id_str = user_id.decode() if isinstance(user_id, bytes) else str(user_id)
metadata_key = f"metadata:online:{user_id_str}"
# 重要:首先检查用户是否已经有在线标记,只有存在才刷新
if await redis_async.exists(metadata_key):
# 只更新已经在线的用户的状态,不恢复已退出的用户
await redis_async.set(metadata_key, "playing", ex=3600)
updated_count += 1
else:
# 如果用户已退出(没有在线标记),则从游玩用户中移除
await _redis_exec(redis_sync.srem, REDIS_PLAYING_USERS_KEY, user_id)
logger.debug(f"Removed user {user_id_str} from playing users as they are offline")
# 设置或刷新metadata在线标记过期时间为1小时
await redis_async.set(metadata_key, "playing", ex=3600)
logger.debug(f"Updated metadata online status for {updated_count} playing users")
logger.debug(f"Updated metadata online status for {len(playing_users)} playing users")
except Exception as e:
logger.error(f"Error maintaining playing users online status: {e}")

View File

@@ -358,7 +358,9 @@ class RedisMessageSystem:
# 确保消息按ID正序排序时间顺序
messages.sort(key=lambda x: x.get("message_id", 0))
return messages
# 如果是获取最新消息since=0需要保持倒序最新的在前面
if since == 0:
messages.reverse()
return messages

File diff suppressed because it is too large Load Diff

View File

@@ -1,221 +0,0 @@
"""
多人游戏数据包清理管理器
基于osu-server源码实现的数据包清理逻辑
"""
from __future__ import annotations
import asyncio
from datetime import UTC, datetime, timedelta
from typing import Dict, List, Optional, Set
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
class MultiplayerPacketCleaner:
"""多人游戏数据包清理管理器基于osu源码设计"""
def __init__(self):
# 待清理的数据包队列
self.cleanup_queue: Dict[int, List[Dict]] = defaultdict(list)
# 清理任务映射
self.cleanup_tasks: Dict[int, asyncio.Task] = {}
# 延迟清理时间(秒)
self.cleanup_delay = 5.0
# 强制清理时间(秒)
self.force_cleanup_delay = 30.0
async def schedule_cleanup(self, room_id: int, packet_data: Dict):
"""安排数据包清理参考osu源码的清理调度"""
self.cleanup_queue[room_id].append({
**packet_data,
'scheduled_at': datetime.now(UTC),
'room_id': room_id
})
# 如果没有正在进行的清理任务,开始新的清理任务
if room_id not in self.cleanup_tasks or self.cleanup_tasks[room_id].done():
self.cleanup_tasks[room_id] = asyncio.create_task(
self._delayed_cleanup_task(room_id)
)
logger.debug(f"[PacketCleaner] Scheduled cleanup task for room {room_id}")
async def _delayed_cleanup_task(self, room_id: int):
"""延迟清理任务类似osu源码的延迟清理机制"""
try:
# 等待延迟时间
await asyncio.sleep(self.cleanup_delay)
# 执行清理
await self._execute_cleanup(room_id)
except asyncio.CancelledError:
logger.debug(f"[PacketCleaner] Cleanup task for room {room_id} was cancelled")
raise
except Exception as e:
logger.error(f"[PacketCleaner] Error during cleanup for room {room_id}: {e}")
async def _execute_cleanup(self, room_id: int):
"""执行实际的清理操作"""
if room_id not in self.cleanup_queue:
return
packets_to_clean = self.cleanup_queue.pop(room_id, [])
if not packets_to_clean:
return
logger.info(f"[PacketCleaner] Cleaning {len(packets_to_clean)} packets for room {room_id}")
# 按类型分组处理清理
score_packets = []
state_packets = []
leaderboard_packets = []
for packet in packets_to_clean:
packet_type = packet.get('type', 'unknown')
if packet_type == 'score':
score_packets.append(packet)
elif packet_type == 'state':
state_packets.append(packet)
elif packet_type == 'leaderboard':
leaderboard_packets.append(packet)
# 清理分数数据包
if score_packets:
await self._cleanup_score_packets(room_id, score_packets)
# 清理状态数据包
if state_packets:
await self._cleanup_state_packets(room_id, state_packets)
# 清理排行榜数据包
if leaderboard_packets:
await self._cleanup_leaderboard_packets(room_id, leaderboard_packets)
async def _cleanup_score_packets(self, room_id: int, packets: List[Dict]):
"""清理分数相关数据包"""
user_ids = set(p.get('user_id') for p in packets if p.get('user_id'))
logger.debug(f"[PacketCleaner] Cleaning score packets for {len(user_ids)} users in room {room_id}")
# 这里可以添加具体的清理逻辑,比如:
# - 清理过期的分数帧
# - 压缩历史分数数据
# - 清理缓存
async def _cleanup_state_packets(self, room_id: int, packets: List[Dict]):
"""清理状态相关数据包"""
logger.debug(f"[PacketCleaner] Cleaning {len(packets)} state packets for room {room_id}")
# 这里可以添加状态清理逻辑,比如:
# - 清理过期状态数据
# - 重置用户状态缓存
async def _cleanup_leaderboard_packets(self, room_id: int, packets: List[Dict]):
"""清理排行榜相关数据包"""
logger.debug(f"[PacketCleaner] Cleaning {len(packets)} leaderboard packets for room {room_id}")
# 这里可以添加排行榜清理逻辑
async def force_cleanup(self, room_id: int):
"""强制立即清理指定房间的数据包"""
# 取消延迟清理任务
if room_id in self.cleanup_tasks:
task = self.cleanup_tasks.pop(room_id)
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
# 立即执行清理
await self._execute_cleanup(room_id)
logger.info(f"[PacketCleaner] Force cleaned packets for room {room_id}")
async def cleanup_all_for_room(self, room_id: int):
"""清理房间的所有数据包(房间结束时调用)"""
await self.force_cleanup(room_id)
# 清理任务引用
self.cleanup_tasks.pop(room_id, None)
self.cleanup_queue.pop(room_id, None)
logger.info(f"[PacketCleaner] Completed full cleanup for room {room_id}")
async def cleanup_expired_packets(self):
"""定期清理过期的数据包"""
current_time = datetime.now(UTC)
expired_rooms = []
for room_id, packets in self.cleanup_queue.items():
# 查找超过强制清理时间的数据包
expired_packets = [
p for p in packets
if (current_time - p['scheduled_at']).total_seconds() > self.force_cleanup_delay
]
if expired_packets:
expired_rooms.append(room_id)
# 强制清理过期数据包
for room_id in expired_rooms:
await self.force_cleanup(room_id)
def get_cleanup_stats(self) -> Dict:
"""获取清理统计信息"""
return {
'active_cleanup_tasks': len([t for t in self.cleanup_tasks.values() if not t.done()]),
'pending_packets': sum(len(packets) for packets in self.cleanup_queue.values()),
'rooms_with_pending_cleanup': len(self.cleanup_queue),
}
# 全局实例
packet_cleaner = MultiplayerPacketCleaner()
class GameSessionCleaner:
"""游戏会话清理器参考osu源码的会话管理"""
@staticmethod
async def cleanup_game_session(room_id: int, game_completed: bool = False):
"""清理游戏会话数据(每局游戏结束后调用)"""
try:
# 安排数据包清理
await packet_cleaner.schedule_cleanup(room_id, {
'type': 'game_session_end',
'completed': game_completed,
'timestamp': datetime.now(UTC).isoformat()
})
logger.info(f"[GameSessionCleaner] Scheduled cleanup for game session in room {room_id} (completed: {game_completed})")
except Exception as e:
logger.error(f"[GameSessionCleaner] Failed to cleanup game session for room {room_id}: {e}")
@staticmethod
async def cleanup_user_session(room_id: int, user_id: int):
"""清理单个用户的会话数据"""
try:
await packet_cleaner.schedule_cleanup(room_id, {
'type': 'user_session_end',
'user_id': user_id,
'timestamp': datetime.now(UTC).isoformat()
})
logger.debug(f"[GameSessionCleaner] Scheduled cleanup for user {user_id} in room {room_id}")
except Exception as e:
logger.error(f"[GameSessionCleaner] Failed to cleanup user session {user_id} in room {room_id}: {e}")
@staticmethod
async def cleanup_room_fully(room_id: int):
"""完全清理房间数据(房间关闭时调用)"""
try:
await packet_cleaner.cleanup_all_for_room(room_id)
logger.info(f"[GameSessionCleaner] Completed full room cleanup for {room_id}")
except Exception as e:
logger.error(f"[GameSessionCleaner] Failed to fully cleanup room {room_id}: {e}")

View File

@@ -34,7 +34,6 @@ from app.models.spectator_hub import (
from app.utils import unix_timestamp_to_windows
from .hub import Client, Hub
from .spectator_buffer import spectator_state_manager
from httpx import HTTPError
from sqlalchemy.orm import joinedload
@@ -204,22 +203,8 @@ class SpectatorHub(Hub[StoreClientState]):
# Send all current player states to the new client
# This matches the official OnConnectedAsync behavior
active_states = []
# 首先从缓冲区获取状态
buffer_stats = spectator_state_manager.get_buffer_stats()
if buffer_stats['active_users'] > 0:
logger.debug(f"[SpectatorHub] Found {buffer_stats['active_users']} users in buffer")
# 获取缓冲区中的所有活跃用户
active_users = spectator_state_manager.buffer.get_all_active_users()
for user_id in active_users:
state = spectator_state_manager.buffer.get_user_state(user_id)
if state and state.state == SpectatedUserState.Playing:
active_states.append((user_id, state))
# 然后从本地状态获取
for user_id, store in self.state.items():
if store.state is not None and user_id not in [state[0] for state in active_states]:
if store.state is not None:
active_states.append((user_id, store.state))
if active_states:
@@ -256,41 +241,24 @@ class SpectatorHub(Hub[StoreClientState]):
and room_user.user_id not in self.state
):
# Create a synthetic SpectatorState for multiplayer players
# 关键修复:处理多人游戏中不同用户可能选择不同谱面的情况
# This helps with cross-hub spectating
try:
# 获取用户选择的谱面ID如果是自由选择模式
user_beatmap_id = getattr(room_user, 'beatmap_id', None) or server_room.queue.current_item.beatmap_id
user_ruleset_id = room_user.ruleset_id or server_room.queue.current_item.ruleset_id or 0
user_mods = room_user.mods or []
synthetic_state = SpectatorState(
beatmap_id=user_beatmap_id,
ruleset_id=user_ruleset_id,
mods=user_mods,
beatmap_id=server_room.queue.current_item.beatmap_id,
ruleset_id=room_user.ruleset_id or 0, # Default to osu!
mods=room_user.mods,
state=SpectatedUserState.Playing,
maximum_statistics={},
)
# 同步到缓冲区管理器
multiplayer_data = {
'room_id': room_id,
'beatmap_id': user_beatmap_id,
'ruleset_id': user_ruleset_id,
'mods': user_mods,
'state': room_user.state,
'maximum_statistics': {},
'is_multiplayer': True
}
await spectator_state_manager.sync_with_multiplayer(room_user.user_id, multiplayer_data)
await self.call_noblock(
client,
"UserBeganPlaying",
room_user.user_id,
synthetic_state,
)
logger.info(
f"[SpectatorHub] Sent synthetic multiplayer state for user {room_user.user_id} (beatmap: {user_beatmap_id}, ruleset: {user_ruleset_id})"
logger.debug(
f"[SpectatorHub] Sent synthetic multiplayer state for user {room_user.user_id}"
)
except Exception as e:
logger.debug(
@@ -312,9 +280,6 @@ class SpectatorHub(Hub[StoreClientState]):
maximum_statistics={},
)
# 也同步结束状态到缓冲区
await spectator_state_manager.handle_user_finished_playing(room_user.user_id, finished_state)
await self.call_noblock(
client,
"UserFinishedPlaying",
@@ -386,15 +351,6 @@ class SpectatorHub(Hub[StoreClientState]):
# # 预缓存beatmap文件以加速后续PP计算
# await self._preload_beatmap_for_pp_calculation(state.beatmap_id)
# 更新缓冲区状态
session_data = {
'beatmap_checksum': store.checksum,
'score_token': score_token,
'username': name,
'started_at': time.time()
}
await spectator_state_manager.handle_user_began_playing(user_id, state, session_data)
await self.broadcast_group_call(
self.group_id(user_id),
"UserBeganPlaying",
@@ -421,9 +377,6 @@ class SpectatorHub(Hub[StoreClientState]):
score_info.statistics = header.statistics
store.score.replay_frames.extend(frame_data.frames)
# 更新缓冲区的帧数据
await spectator_state_manager.handle_frame_data(user_id, frame_data)
await self.broadcast_group_call(
self.group_id(user_id), "UserSentFrames", user_id, frame_data
)
@@ -605,9 +558,6 @@ class SpectatorHub(Hub[StoreClientState]):
self.tasks.add(task)
task.add_done_callback(self.tasks.discard)
# 通知缓冲区管理器用户结束游戏
await spectator_state_manager.handle_user_finished_playing(user_id, state)
logger.info(
f"[SpectatorHub] {user_id} finished playing {state.beatmap_id} "
f"with {state.state}"
@@ -628,54 +578,27 @@ class SpectatorHub(Hub[StoreClientState]):
logger.info(f"[SpectatorHub] {user_id} started watching {target_id}")
# 使用缓冲区管理器处理观战开始,获取追赶数据
catchup_bundle = await spectator_state_manager.handle_spectator_start_watching(user_id, target_id)
try:
# 首先尝试从缓冲区获取状态
buffered_state = spectator_state_manager.buffer.get_user_state(target_id)
if buffered_state and buffered_state.state == SpectatedUserState.Playing:
logger.info(
f"[SpectatorHub] Sending buffered state for {target_id} to spectator {user_id} "
f"(beatmap: {buffered_state.beatmap_id}, ruleset: {buffered_state.ruleset_id})"
)
await self.call_noblock(client, "UserBeganPlaying", target_id, buffered_state)
# 发送最近的帧数据以帮助同步
recent_frames = spectator_state_manager.buffer.get_recent_frames(target_id, 10)
for frame_data in recent_frames:
try:
await self.call_noblock(client, "UserSentFrames", target_id, frame_data)
except Exception as e:
logger.debug(f"[SpectatorHub] Failed to send frame data: {e}")
# 如果有追赶数据包,发送额外的同步信息
if catchup_bundle:
multiplayer_data = catchup_bundle.get('multiplayer_data')
if multiplayer_data and multiplayer_data.get('is_multiplayer'):
logger.info(
f"[SpectatorHub] Sending multiplayer sync data for {target_id} "
f"(room: {multiplayer_data.get('room_id')})"
)
else:
# 尝试从本地状态获取
target_store = self.state.get(target_id)
if target_store and target_store.state:
# CRITICAL FIX: Only send state if user is actually playing
# Don't send state for finished/quit games
if target_store.state.state == SpectatedUserState.Playing:
logger.debug(f"[SpectatorHub] {target_id} is currently playing, sending local state")
await self.call_noblock(client, "UserBeganPlaying", target_id, target_store.state)
else:
logger.debug(f"[SpectatorHub] {target_id} state is {target_store.state.state}, not sending to watcher")
# Get target user's current state if it exists
target_store = self.state.get(target_id)
if target_store and target_store.state:
# CRITICAL FIX: Only send state if user is actually playing
# Don't send state for finished/quit games
if target_store.state.state == SpectatedUserState.Playing:
logger.debug(
f"[SpectatorHub] {target_id} is currently playing, sending state"
)
# Send current state to the watcher immediately
await self.call_noblock(
client,
"UserBeganPlaying",
target_id,
target_store.state,
)
else:
# 检查多人游戏同步缓存
multiplayer_data = spectator_state_manager.buffer.get_multiplayer_sync_data(target_id)
if multiplayer_data:
logger.debug(f"[SpectatorHub] Sending multiplayer sync data for {target_id}")
# 这里可以发送多人游戏的状态信息
logger.debug(
f"[SpectatorHub] {target_id} state is {target_store.state.state}, not sending to watcher"
)
except Exception as e:
# User isn't tracked or error occurred - this is not critical
logger.debug(f"[SpectatorHub] Could not get state for {target_id}: {e}")
@@ -721,9 +644,6 @@ class SpectatorHub(Hub[StoreClientState]):
logger.info(f"[SpectatorHub] {user_id} ended watching {target_id}")
# 使用缓冲区管理器处理观战结束
await spectator_state_manager.handle_spectator_stop_watching(user_id, target_id)
# Remove from SignalR group
self.remove_from_group(client, self.group_id(target_id))

View File

@@ -1,339 +0,0 @@
"""
观战Hub缓冲区管理器
解决第一局游戏结束后观战和排行榜不同步的问题
"""
from __future__ import annotations
import asyncio
from datetime import UTC, datetime, timedelta
from typing import Dict, List, Optional, Tuple, Set
from collections import defaultdict, deque
import logging
from app.models.spectator_hub import SpectatorState, FrameDataBundle, SpectatedUserState
from app.models.multiplayer_hub import MultiplayerUserState
logger = logging.getLogger(__name__)
class SpectatorBuffer:
"""观战数据缓冲区,解决观战状态不同步问题"""
def __init__(self):
# 用户ID -> 游戏状态缓存
self.user_states: Dict[int, SpectatorState] = {}
# 用户ID -> 帧数据缓冲区 (保留最近的帧数据)
self.frame_buffers: Dict[int, deque] = defaultdict(lambda: deque(maxlen=30))
# 用户ID -> 最后活跃时间
self.last_activity: Dict[int, datetime] = {}
# 用户ID -> 观战者列表
self.spectators: Dict[int, Set[int]] = defaultdict(set)
# 用户ID -> 游戏会话信息
self.session_info: Dict[int, Dict] = {}
# 多人游戏同步缓存
self.multiplayer_sync_cache: Dict[int, Dict] = {} # user_id -> multiplayer_data
# 缓冲区过期时间(分钟)
self.buffer_expire_time = 10
def update_user_state(self, user_id: int, state: SpectatorState, session_data: Optional[Dict] = None):
"""更新用户状态到缓冲区"""
self.user_states[user_id] = state
self.last_activity[user_id] = datetime.now(UTC)
if session_data:
self.session_info[user_id] = session_data
logger.debug(f"[SpectatorBuffer] Updated state for user {user_id}: {state.state}")
def add_frame_data(self, user_id: int, frame_data: FrameDataBundle):
"""添加帧数据到缓冲区"""
self.frame_buffers[user_id].append({
'data': frame_data,
'timestamp': datetime.now(UTC)
})
self.last_activity[user_id] = datetime.now(UTC)
def get_user_state(self, user_id: int) -> Optional[SpectatorState]:
"""获取用户当前状态"""
return self.user_states.get(user_id)
def get_recent_frames(self, user_id: int, count: int = 10) -> List[FrameDataBundle]:
"""获取用户最近的帧数据"""
frames = self.frame_buffers.get(user_id, deque())
recent_frames = list(frames)[-count:] if len(frames) >= count else list(frames)
return [frame['data'] for frame in recent_frames]
def add_spectator(self, user_id: int, spectator_id: int):
"""添加观战者"""
self.spectators[user_id].add(spectator_id)
logger.debug(f"[SpectatorBuffer] Added spectator {spectator_id} to user {user_id}")
def remove_spectator(self, user_id: int, spectator_id: int):
"""移除观战者"""
self.spectators[user_id].discard(spectator_id)
logger.debug(f"[SpectatorBuffer] Removed spectator {spectator_id} from user {user_id}")
def get_spectators(self, user_id: int) -> Set[int]:
"""获取用户的所有观战者"""
return self.spectators.get(user_id, set())
def clear_user_data(self, user_id: int):
"""清理用户数据(游戏结束时调用,但保留一段时间用于观战同步)"""
# 不立即删除,而是标记为已结束,延迟清理
if user_id in self.user_states:
current_state = self.user_states[user_id]
if current_state.state == SpectatedUserState.Playing:
# 将状态标记为已结束,但保留在缓冲区中
current_state.state = SpectatedUserState.Passed # 或其他结束状态
self.user_states[user_id] = current_state
logger.debug(f"[SpectatorBuffer] Marked user {user_id} as finished, keeping in buffer")
def cleanup_expired_data(self):
"""清理过期数据"""
current_time = datetime.now(UTC)
expired_users = []
for user_id, last_time in self.last_activity.items():
if (current_time - last_time).total_seconds() > self.buffer_expire_time * 60:
expired_users.append(user_id)
for user_id in expired_users:
self._force_clear_user(user_id)
logger.debug(f"[SpectatorBuffer] Cleaned expired data for user {user_id}")
def _force_clear_user(self, user_id: int):
"""强制清理用户数据"""
self.user_states.pop(user_id, None)
self.frame_buffers.pop(user_id, None)
self.last_activity.pop(user_id, None)
self.spectators.pop(user_id, None)
self.session_info.pop(user_id, None)
self.multiplayer_sync_cache.pop(user_id, None)
def sync_multiplayer_state(self, user_id: int, multiplayer_data: Dict):
"""同步多人游戏状态"""
self.multiplayer_sync_cache[user_id] = {
**multiplayer_data,
'synced_at': datetime.now(UTC)
}
logger.debug(f"[SpectatorBuffer] Synced multiplayer state for user {user_id}")
def get_multiplayer_sync_data(self, user_id: int) -> Optional[Dict]:
"""获取多人游戏同步数据"""
return self.multiplayer_sync_cache.get(user_id)
def has_active_spectators(self, user_id: int) -> bool:
"""检查用户是否有活跃的观战者"""
return len(self.spectators.get(user_id, set())) > 0
def get_all_active_users(self) -> List[int]:
"""获取所有活跃用户"""
current_time = datetime.now(UTC)
active_users = []
for user_id, last_time in self.last_activity.items():
if (current_time - last_time).total_seconds() < 300: # 5分钟内活跃
active_users.append(user_id)
return active_users
def create_catchup_bundle(self, user_id: int) -> Optional[Dict]:
"""为新观战者创建追赶数据包"""
if user_id not in self.user_states:
return None
state = self.user_states[user_id]
recent_frames = self.get_recent_frames(user_id, 20) # 获取最近20帧
session_data = self.session_info.get(user_id, {})
return {
'user_id': user_id,
'state': state,
'recent_frames': recent_frames,
'session_info': session_data,
'multiplayer_data': self.get_multiplayer_sync_data(user_id),
'created_at': datetime.now(UTC).isoformat()
}
def get_buffer_stats(self) -> Dict:
"""获取缓冲区统计信息"""
return {
'active_users': len(self.user_states),
'total_spectators': sum(len(specs) for specs in self.spectators.values()),
'buffered_frames': sum(len(frames) for frames in self.frame_buffers.values()),
'multiplayer_synced_users': len(self.multiplayer_sync_cache)
}
class SpectatorStateManager:
"""观战状态管理器,处理状态同步和缓冲"""
def __init__(self):
self.buffer = SpectatorBuffer()
self.cleanup_task: Optional[asyncio.Task] = None
self.start_cleanup_task()
def start_cleanup_task(self):
"""启动定期清理任务"""
if self.cleanup_task is None or self.cleanup_task.done():
self.cleanup_task = asyncio.create_task(self._periodic_cleanup())
async def _periodic_cleanup(self):
"""定期清理过期数据"""
while True:
try:
await asyncio.sleep(300) # 每5分钟清理一次
self.buffer.cleanup_expired_data()
stats = self.buffer.get_buffer_stats()
if stats['active_users'] > 0:
logger.debug(f"[SpectatorStateManager] Buffer stats: {stats}")
except Exception as e:
logger.error(f"[SpectatorStateManager] Error in periodic cleanup: {e}")
except asyncio.CancelledError:
logger.info("[SpectatorStateManager] Periodic cleanup task cancelled")
break
async def handle_user_began_playing(self, user_id: int, state: SpectatorState, session_data: Optional[Dict] = None):
"""处理用户开始游戏"""
self.buffer.update_user_state(user_id, state, session_data)
# 如果有观战者,发送追赶数据
spectators = self.buffer.get_spectators(user_id)
if spectators:
logger.debug(f"[SpectatorStateManager] User {user_id} has {len(spectators)} spectators, maintaining buffer")
async def handle_user_finished_playing(self, user_id: int, final_state: SpectatorState):
"""处理用户结束游戏"""
# 更新为结束状态,但保留在缓冲区中以便观战者同步
self.buffer.update_user_state(user_id, final_state)
# 如果有观战者,保持数据在缓冲区中更长时间
if self.buffer.has_active_spectators(user_id):
logger.debug(f"[SpectatorStateManager] User {user_id} finished, keeping data for spectators")
else:
# 延迟清理
asyncio.create_task(self._delayed_cleanup_user(user_id, 60)) # 60秒后清理
async def _delayed_cleanup_user(self, user_id: int, delay_seconds: int):
"""延迟清理用户数据"""
await asyncio.sleep(delay_seconds)
if not self.buffer.has_active_spectators(user_id):
self.buffer.clear_user_data(user_id)
logger.debug(f"[SpectatorStateManager] Delayed cleanup for user {user_id}")
async def handle_frame_data(self, user_id: int, frame_data: FrameDataBundle):
"""处理帧数据"""
self.buffer.add_frame_data(user_id, frame_data)
async def handle_spectator_start_watching(self, spectator_id: int, target_id: int) -> Optional[Dict]:
"""处理观战者开始观看,返回追赶数据包"""
self.buffer.add_spectator(target_id, spectator_id)
# 为新观战者创建追赶数据包
catchup_bundle = self.buffer.create_catchup_bundle(target_id)
if catchup_bundle:
logger.debug(f"[SpectatorStateManager] Created catchup bundle for spectator {spectator_id} watching {target_id}")
return catchup_bundle
async def handle_spectator_stop_watching(self, spectator_id: int, target_id: int):
"""处理观战者停止观看"""
self.buffer.remove_spectator(target_id, spectator_id)
async def sync_with_multiplayer(self, user_id: int, multiplayer_data: Dict):
"""与多人游戏模式同步"""
self.buffer.sync_multiplayer_state(user_id, multiplayer_data)
beatmap_id = multiplayer_data.get('beatmap_id')
ruleset_id = multiplayer_data.get('ruleset_id', 0)
logger.info(
f"[SpectatorStateManager] Syncing multiplayer data for user {user_id}: "
f"beatmap={beatmap_id}, ruleset={ruleset_id}"
)
# 如果用户没有在SpectatorHub中但在多人游戏中创建同步状态
if user_id not in self.buffer.user_states:
try:
synthetic_state = SpectatorState(
beatmap_id=beatmap_id,
ruleset_id=ruleset_id,
mods=multiplayer_data.get('mods', []),
state=self._convert_multiplayer_state(multiplayer_data.get('state')),
maximum_statistics=multiplayer_data.get('maximum_statistics', {}),
)
await self.handle_user_began_playing(user_id, synthetic_state, {
'source': 'multiplayer',
'room_id': multiplayer_data.get('room_id'),
'beatmap_id': beatmap_id,
'ruleset_id': ruleset_id,
'is_multiplayer': multiplayer_data.get('is_multiplayer', True),
'synced_at': datetime.now(UTC).isoformat()
})
logger.info(
f"[SpectatorStateManager] Created synthetic state for multiplayer user {user_id} "
f"(beatmap: {beatmap_id}, ruleset: {ruleset_id})"
)
except Exception as e:
logger.error(f"[SpectatorStateManager] Failed to create synthetic state for user {user_id}: {e}")
else:
# 更新现有状态
existing_state = self.buffer.user_states[user_id]
if existing_state.beatmap_id != beatmap_id or existing_state.ruleset_id != ruleset_id:
logger.info(
f"[SpectatorStateManager] Updating state for user {user_id}: "
f"beatmap {existing_state.beatmap_id} -> {beatmap_id}, "
f"ruleset {existing_state.ruleset_id} -> {ruleset_id}"
)
# 更新状态以匹配多人游戏
existing_state.beatmap_id = beatmap_id
existing_state.ruleset_id = ruleset_id
existing_state.mods = multiplayer_data.get('mods', [])
self.buffer.update_user_state(user_id, existing_state)
def _convert_multiplayer_state(self, mp_state) -> SpectatedUserState:
"""将多人游戏状态转换为观战状态"""
if not mp_state:
return SpectatedUserState.Playing
# 假设mp_state是MultiplayerUserState类型
if hasattr(mp_state, 'name'):
state_name = mp_state.name
if 'PLAYING' in state_name:
return SpectatedUserState.Playing
elif 'RESULTS' in state_name:
return SpectatedUserState.Passed
elif 'FAILED' in state_name:
return SpectatedUserState.Failed
elif 'QUIT' in state_name:
return SpectatedUserState.Quit
return SpectatedUserState.Playing # 默认状态
def get_buffer_stats(self) -> Dict:
"""获取缓冲区统计信息"""
return self.buffer.get_buffer_stats()
def stop_cleanup_task(self):
"""停止清理任务"""
if self.cleanup_task and not self.cleanup_task.done():
self.cleanup_task.cancel()
# 全局实例
spectator_state_manager = SpectatorStateManager()

View File

@@ -130,63 +130,6 @@ def truncate(text: str, limit: int = 100, ellipsis: str = "...") -> str:
return text
def parse_user_agent(user_agent: str | None, max_length: int = 255) -> str | None:
"""
解析用户代理字符串,提取关键信息:设备、系统、浏览器
参数:
user_agent: 用户代理字符串
max_length: 最大长度限制
返回:
简化后的用户代理字符串
"""
if user_agent is None:
return None
# 检查是否是 osu! 客户端
if "osu!" in user_agent.lower():
return "osu!"
# 提取关键信息
parsed_info = []
# 提取设备信息
device_matches = [
# 常见移动设备型号
r"(iPhone|iPad|iPod|Android|ALI-AN00|SM-\w+|MI \w+|Redmi|HUAWEI|HONOR|POCO)",
# 其他设备关键词
r"(Windows NT|Macintosh|Linux|Ubuntu)"
]
import re
for pattern in device_matches:
matches = re.findall(pattern, user_agent)
if matches:
parsed_info.extend(matches)
# 提取浏览器信息
browser_matches = [
r"(Chrome|Firefox|Safari|Edge|MSIE|MQQBrowser|MiuiBrowser|OPR|Opera)",
r"(WebKit|Gecko|Trident)"
]
for pattern in browser_matches:
matches = re.findall(pattern, user_agent)
if matches:
# 只取第一个匹配的浏览器
parsed_info.append(matches[0])
break
# 组合信息
if parsed_info:
result = " / ".join(set(parsed_info))
return truncate(result, max_length - 3, "...")
# 如果无法解析,则截断原始字符串
return truncate(user_agent, max_length - 3, "...")
def check_image(content: bytes, size: int, width: int, height: int) -> None:
if len(content) > size: # 10MB limit
raise HTTPException(status_code=400, detail="File size exceeds 10MB limit")

View File

@@ -1,150 +0,0 @@
# 多人游戏数据包清理系统使用指南
## 概述
基于osu-server源码实现的多人游戏数据包清理系统确保每局游戏结束后自动清理转发包和游戏状态数据防止内存泄漏和性能问题。
## 主要改进
### 1. GameplayStateBuffer 增强功能
- **`cleanup_game_session(room_id)`**: 每局游戏结束后清理会话数据
- **`reset_user_gameplay_state(room_id, user_id)`**: 重置单个用户的游戏状态
- **保留房间结构**: 清理数据但保持房间活跃状态
### 2. MultiplayerPacketCleaner 数据包清理管理器
- **延迟清理**: 避免游戏过程中的性能影响
- **分类清理**: 按数据包类型score、state、leaderboard分类处理
- **强制清理**: 处理过期数据包
- **统计监控**: 提供清理统计信息
### 3. GameSessionCleaner 会话清理器
- **`cleanup_game_session(room_id, completed)`**: 游戏会话清理
- **`cleanup_user_session(room_id, user_id)`**: 用户会话清理
- **`cleanup_room_fully(room_id)`**: 房间完全清理
## 清理触发点
### 1. 每局游戏结束
```python
# 在 update_room_state() 中,当所有玩家完成游戏时
await self._cleanup_game_session(room_id, any_user_finished_playing)
```
### 2. 用户离开房间
```python
# 在 make_user_leave() 中清理用户数据
await GameSessionCleaner.cleanup_user_session(room_id, user_id)
```
### 3. 用户中断游戏
```python
# 在 AbortGameplay() 中重置用户状态
gameplay_buffer.reset_user_gameplay_state(room_id, user.user_id)
```
### 4. 主机中断比赛
```python
# 在 AbortMatch() 中清理所有玩家数据
await self._cleanup_game_session(room_id, False) # False = 游戏被中断
```
### 5. 房间关闭
```python
# 在 end_room() 中完全清理房间
await GameSessionCleaner.cleanup_room_fully(room_id)
```
## 自动化机制
### 1. 定期清理任务
- 每分钟检查并清理过期数据包
- 防止内存泄漏
### 2. 数据包调度清理
- 5秒延迟清理避免影响实时性能
- 30秒强制清理防止数据积累
### 3. 状态感知清理
- 游戏状态变化时自动触发相应清理
- 用户状态转换时重置游戏数据
## 性能优化
### 1. 异步清理
- 所有清理操作都是异步的
- 不阻塞游戏主流程
### 2. 延迟执行
- 延迟清理避免频繁操作
- 批量处理提高效率
### 3. 分级清理
- 用户级别清理(个人数据)
- 会话级别清理(单局游戏)
- 房间级别清理(整个房间)
## 监控和调试
### 1. 清理统计
```python
stats = packet_cleaner.get_cleanup_stats()
# 返回: active_cleanup_tasks, pending_packets, rooms_with_pending_cleanup
```
### 2. 日志记录
- 详细的清理操作日志
- 错误处理和恢复机制
### 3. 错误处理
- 清理失败不影响游戏流程
- 优雅降级处理
## 与osu源码的对比
### 相似之处
1. **延迟清理机制**: 类似osu的清理调度
2. **分类数据包管理**: 按类型处理不同数据包
3. **状态感知清理**: 根据游戏状态触发清理
4. **错误隔离**: 清理错误不影响游戏
### Python特定优化
1. **异步协程**: 使用async/await提高并发性能
2. **字典缓存**: 使用defaultdict优化数据结构
3. **生成器**: 使用deque限制缓冲区大小
4. **上下文管理**: 自动资源清理
## 最佳实践
### 1. 及时清理
- 游戏结束立即触发会话清理
- 用户离开立即清理个人数据
### 2. 渐进式清理
- 先清理用户数据
- 再清理会话数据
- 最后清理房间数据
### 3. 监控资源使用
- 定期检查清理统计
- 监控内存和CPU使用
### 4. 日志分析
- 分析清理频率和效果
- 优化清理策略
## 配置选项
```python
# 数据包清理配置
cleanup_delay = 5.0 # 延迟清理时间(秒)
force_cleanup_delay = 30.0 # 强制清理时间(秒)
leaderboard_broadcast_interval = 2.0 # 排行榜广播间隔(秒)
# 缓冲区大小限制
score_buffer_maxlen = 50 # 分数帧缓冲区最大长度
```
通过这个系统你的Python多人游戏实现现在具备了与osu源码类似的数据包清理能力确保每局游戏结束后自动清理转发包防止内存泄漏并保持系统性能。