液态神经网络(Liquid Neural Networks, LNNs)代码示例
【代码】液态神经网络(Liquid Neural Networks, LNNs)代码示例。
代码实现了一个增强型液态神经网络(Liquid Neural Network, LNN)的演示,包括训练、测试和鲁棒性展示。以下是简明扼要的介绍:
主要组成部分
-
LiquidNeuronLayer 类: 定义了液态神经元层的基本结构和前向传播过程。它包含了输入权重、循环权重、偏置项、时间常数以及激活函数。
-
SimpleLNN 类: 继承自 nn.Module,包含一个液态神经元层和一个输出层,用于将液态神经元层的输出映射到目标维度。
-
generate_sine_wave_data 函数: 生成合成数据集,包含一系列正弦波序列及其对应的下一时刻预测目标。
-
train_lnn_demo 函数: 训练模型,并通过可视化手段分析训练过程中的损失变化、预测与真实值对比、学习到的时间常数分布等信息。该函数还展示了隐藏状态动态变化、预测误差分布等详细分析。
-
demonstrate_robustness 函数: 展示训练好的模型对不同噪声水平输入信号的鲁棒性。通过比较干净信号、带噪声输入信号以及模型输出来评估模型性能。
特色亮点
可视化增强:利用 matplotlib 和 seaborn 提供了丰富的可视化内容,如训练损失曲线、预测与实际对比图、时间常数分布等。
鲁棒性测试:通过在输入信号中加入不同程度的噪声来测试模型的稳定性和准确性,从而验证模型的实用性。
此代码为研究和理解液态神经网络提供了一个完整的流程,从模型构建、训练到性能评估,具有很强的参考价值。
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.integrate import odeint
# 设置绘图风格
plt.rcParams.update({
'font.size': 10,
'axes.titlesize': 12,
'axes.labelsize': 11,
'legend.fontsize': 10,
'xtick.labelsize': 9,
'ytick.labelsize': 9,
})
sns.set_style("whitegrid")
sns.set_palette("tab10")
class LiquidNeuronLayer(nn.Module):
def __init__(self, input_size, hidden_size, dt=0.1):
super(LiquidNeuronLayer, self).__init__()
self.hidden_size = hidden_size
self.dt = dt
self.W_input = nn.Parameter(torch.randn(hidden_size, input_size) * 0.1)
self.W_recurrent = nn.Parameter(torch.randn(hidden_size, hidden_size) * 0.1)
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.time_constants = nn.Parameter(torch.ones(hidden_size) * 0.5)
self.activation = nn.Tanh()
def forward(self, x, hidden_state=None):
seq_len, batch_size, _ = x.shape
if hidden_state is None:
hidden_state = torch.zeros(batch_size, self.hidden_size, device=x.device)
outputs = []
all_hidden = []
for t in range(seq_len):
input_term = torch.mm(x[t], self.W_input.t()) + self.bias
recurrent_term = torch.mm(hidden_state, self.W_recurrent.t())
dh = (-hidden_state + self.activation(input_term + recurrent_term)) / self.time_constants
hidden_state = hidden_state + self.dt * dh
outputs.append(hidden_state.unsqueeze(0))
all_hidden.append(hidden_state.clone())
return torch.cat(outputs, dim=0), hidden_state, torch.stack(all_hidden, dim=0) # [seq_len, batch, hidden]
class SimpleLNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dt=0.1):
super(SimpleLNN, self).__init__()
self.liquid_layer = LiquidNeuronLayer(input_size, hidden_size, dt)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden_state=None):
liquid_out, final_hidden, all_hidden = self.liquid_layer(x, hidden_state)
output = self.output_layer(liquid_out)
return output, final_hidden, all_hidden
def generate_sine_wave_data(seq_length=100, num_sequences=50):
t = np.linspace(0, 4*np.pi, seq_length)
sequences = []
targets = []
for i in range(num_sequences):
freq = 0.5 + 0.5 * np.random.rand()
phase = 2 * np.pi * np.random.rand()
noise = 0.1 * np.random.randn(seq_length)
sequence = np.sin(freq * t + phase) + noise
target = np.sin(freq * (t + 0.1) + phase)
sequences.append(sequence)
targets.append(target)
return (np.array(sequences).reshape(num_sequences, seq_length, 1),
np.array(targets).reshape(num_sequences, seq_length, 1))
def train_lnn_demo():
input_size = 1
hidden_size = 32
output_size = 1
seq_length = 100
batch_size = 16
num_epochs = 200
learning_rate = 0.01
X_train, y_train = generate_sine_wave_data(seq_length, 100)
X_test, y_test = generate_sine_wave_data(seq_length, 20)
X_train = torch.FloatTensor(X_train).transpose(0, 1)
y_train = torch.FloatTensor(y_train).transpose(0, 1)
X_test = torch.FloatTensor(X_test).transpose(0, 1)
y_test = torch.FloatTensor(y_test).transpose(0, 1)
model = SimpleLNN(input_size, hidden_size, output_size)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
losses = []
for epoch in range(num_epochs):
model.train()
total_loss = 0
num_batches = 0
for i in range(0, X_train.shape[1], batch_size):
end_idx = min(i + batch_size, X_train.shape[1])
batch_X = X_train[:, i:end_idx, :]
batch_y = y_train[:, i:end_idx, :]
optimizer.zero_grad()
outputs, _, _ = model(batch_X)
loss = criterion(outputs, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.item()
num_batches += 1
avg_loss = total_loss / num_batches
losses.append(avg_loss)
if epoch % 20 == 0:
print(f'Epoch [{epoch}/{num_epochs}], Loss: {avg_loss:.6f}')
model.eval()
with torch.no_grad():
test_outputs, _, all_hidden_test = model(X_test)
test_loss = criterion(test_outputs, y_test)
print(f'Test Loss: {test_loss:.6f}')
# === 可视化增强 ===
fig = plt.figure(figsize=(18, 12))
gs = fig.add_gridspec(3, 4, hspace=0.35, wspace=0.3)
# 1. 训练损失
ax1 = fig.add_subplot(gs[0, :2])
ax1.plot(losses, color='#2E86AB', linewidth=2)
ax1.set_title('Training Loss (MSE)', fontweight='bold')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss')
ax1.grid(True, linestyle='--', alpha=0.6)
# 2. 预测 vs 真实(多个样本)
ax2 = fig.add_subplot(gs[0, 2:])
sample_indices = [0, 1, 2]
colors = ['#A23B72', '#2E86AB', '#F18F01']
for idx, c in zip(sample_indices, colors):
ax2.plot(y_test[:, idx, 0].numpy(), '--', color=c, alpha=0.8, label=f'True #{idx}')
ax2.plot(test_outputs[:, idx, 0].numpy(), '-', color=c, linewidth=1.5, label=f'Pred #{idx}')
ax2.set_title('Prediction vs Ground Truth (Multiple Samples)')
ax2.set_xlabel('Time Step')
ax2.legend(fontsize=8, ncol=2)
# 3. 时间常数分布
ax3 = fig.add_subplot(gs[1, 0])
time_constants = model.liquid_layer.time_constants.detach().numpy()
sns.histplot(time_constants, bins=12, kde=True, ax=ax3, color='#4CAF50')
ax3.set_title('Learned Time Constants Distribution', fontweight='bold')
ax3.set_xlabel('τ (Time Constant)')
ax3.set_ylabel('Density')
# 4. 隐藏状态热力图(第一个测试样本)
ax4 = fig.add_subplot(gs[1, 1:3])
hidden_seq = all_hidden_test[:, 0, :].T.detach().numpy() # [hidden, seq_len]
im = ax4.imshow(hidden_seq, aspect='auto', cmap='viridis', interpolation='none')
ax4.set_title('Hidden State Dynamics (Neuron Activity Over Time)', fontweight='bold')
ax4.set_xlabel('Time Step')
ax4.set_ylabel('Neuron Index')
plt.colorbar(im, ax=ax4, shrink=0.8)
# 5. 预测误差分布
ax5 = fig.add_subplot(gs[1, 3])
errors = (test_outputs - y_test).numpy().flatten()
sns.histplot(errors, kde=True, ax=ax5, color='#E53935')
ax5.set_title('Prediction Error Distribution', fontweight='bold')
ax5.set_xlabel('Error (Pred - True)')
# 6. 残差时序图(第一个样本)
ax6 = fig.add_subplot(gs[2, :2])
residuals = (test_outputs[:, 0, 0] - y_test[:, 0, 0]).numpy()
ax6.plot(residuals, color='#6A1B9A', linewidth=1.2)
ax6.axhline(0, color='black', linestyle='--', linewidth=0.8)
ax6.set_title('Residuals Over Time (Sample #0)', fontweight='bold')
ax6.set_xlabel('Time Step')
ax6.set_ylabel('Residual')
# 7. 时间常数 vs 神经元响应速度(用初始响应斜率近似)
ax7 = fig.add_subplot(gs[2, 2])
# 粗略估计:对阶跃输入的初始响应速度
step_input = torch.zeros(20, 1, 1)
step_input[5:, :, :] = 1.0
with torch.no_grad():
_, _, hidden_step = model(step_input)
initial_response = hidden_step[6, 0, :].numpy() - hidden_step[5, 0, :].numpy() # Δh at step onset
tau_vals = time_constants
ax7.scatter(tau_vals, np.abs(initial_response), alpha=0.7, color='#00796B')
ax7.set_xlabel('Time Constant τ')
ax7.set_ylabel('|Initial Response Slope|')
ax7.set_title('τ vs Initial Response Speed', fontweight='bold')
ax7.grid(True, linestyle='--', alpha=0.5)
# 8. 模型信息文本框
ax8 = fig.add_subplot(gs[2, 3])
ax8.axis('off')
info_text = (
f"Model Summary:\n"
f"• Hidden Size: {hidden_size}\n"
f"• Time Step (dt): 0.1\n"
f"• Final Test Loss: {test_loss:.5f}\n"
f"• τ Range: [{time_constants.min():.3f}, {time_constants.max():.3f}]\n"
f"• Mean τ: {time_constants.mean():.3f}"
)
ax8.text(0.1, 0.5, info_text, fontsize=10, verticalalignment='center',
bbox=dict(boxstyle="round,pad=0.3", facecolor="#E3F2FD", edgecolor="#1976D2"))
plt.suptitle('Liquid Neural Network (LNN) Training & Analysis Dashboard', fontsize=16, fontweight='bold')
plt.show()
return model # 返回训练好的模型用于后续演示
def demonstrate_robustness(model):
print("\n=== Robustness Demonstration (Using Trained Model) ===")
t = np.linspace(0, 4*np.pi, 50)
clean_signal = np.sin(t).reshape(-1, 1, 1)
noise_levels = [0.0, 0.2, 0.5, 1.0]
fig, axes = plt.subplots(2, 2, figsize=(14, 10))
fig.suptitle('LNN Robustness to Input Noise (Trained Model)', fontsize=14, fontweight='bold')
for i, noise_level in enumerate(noise_levels):
ax = axes[i//2, i%2]
noisy_signal = clean_signal + noise_level * np.random.randn(*clean_signal.shape)
input_tensor = torch.FloatTensor(noisy_signal)
with torch.no_grad():
output, _, _ = model(input_tensor, torch.zeros(1, model.liquid_layer.hidden_size))
ax.plot(t, clean_signal[:, 0, 0], 'g-', label='Clean Signal', linewidth=2.2)
ax.plot(t, noisy_signal[:, 0, 0], 'r.', markersize=3, alpha=0.6, label=f'Noisy Input (σ={noise_level})')
ax.plot(t, output[:, 0, 0].numpy(), 'b-', label='LNN Output', linewidth=2)
ax.set_title(f'Noise Level σ = {noise_level}', fontweight='bold')
ax.legend()
ax.grid(True, linestyle='--', alpha=0.5)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
if __name__ == "__main__":
print("🚀 Enhanced Liquid Neural Network (LNN) Demo")
print("=" * 60)
# 训练并获取模型
trained_model = train_lnn_demo()
# 使用训练好的模型进行鲁棒性演示
demonstrate_robustness(trained_model)

魔乐社区(Modelers.cn) 是一个中立、公益的人工智能社区,提供人工智能工具、模型、数据的托管、展示与应用协同服务,为人工智能开发及爱好者搭建开放的学习交流平台。社区通过理事会方式运作,由全产业链共同建设、共同运营、共同享有,推动国产AI生态繁荣发展。
更多推荐

所有评论(0)