• [技术干货] 2024年3月人工智能问题总结合集
    三月问题总结如下:【1】请问下BOS可以通过这个链接访问吗cid:link_1【2】盘古大模型应该如何集成到应用中? cid:link_2【3】卷积神经网络(CNN)在图像处理中的应用是什么?cid:link_3【4】自然语言处理(NLP)的主要任务有哪些?cid:link_4【5】图像识别与目标检测的区别是什么?cid:link_5【6】图形分类算法现在哪个比较好cid:link_0
  • [技术干货] 基于机器学习的深度学习的玫瑰花种类的识别
    准备自行准备一个玫瑰花朵数据集,尽量多的种类和数量,下面教程已自备数据集。数据预处理将图片转换为模型可以处理的格式,对数据进行归一化处理。import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator # 设置图片大小和批次大小 IMG_SIZE = (224, 224) BATCH_SIZE = 32 # 创建ImageDataGenerator实例,用于数据增强和预处理 train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest' ) # 加载训练数据集 train_data = train_datagen.flow_from_directory( 'flowers', target_size=IMG_SIZE, batch_size=BATCH_SIZE, class_mode='categorical' )模型构建使用预训练的ResNet50模型作为特征提取器,然后搭建一个全连接层用于分类from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.applications.resnet50 import ResNet50 # 加载ResNet50模型 base_model = ResNet50(weights='imagenet', include_top=False, input_shape=IMG_SIZE + (3,)) # 在ResNet50模型基础上搭建全连接层 x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) predictions = Dense(train_data.num_classes, activation='softmax')(x) # 构建完整模型 model = Model(inputs=base_model.input, outputs=predictions) # 冻结ResNet50模型的所有层 for layer in base_model.layers: layer.trainable = False模型训练和评估训练:# 编译模型 model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # 训练模型 model.fit(train_data, epochs=10)模型训练完成后需要评估:# 加载测试数据集 test_datagen = ImageDataGenerator(rescale=1./255) test_data = test_datagen.flow_from_directory( 'test', target_size=IMG_SIZE, batch_size=BATCH_SIZE, class_mode='categorical' ) # 在测试集上评估模型 test_loss, test_acc = model.evaluate(test_data) print('Test accuracy:', test_acc)必要时调整模型再进行训练:# 设置训练参数 EPOCHS = 50 STEPS_PER_EPOCH = len(train_data) VALIDATION_STEPS = len(valid_data) # 开始训练模型 history = model.fit( train_data, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, validation_data=valid_data, validation_steps=VALIDATION_STEPS )
  • [技术干货] 快速体验:基于卷积神经网络的艺术品瑕疵检测与修复
    前言卷积神经网络具有优秀的特征提取能力和模式识别能力。在艺术品瑕疵检测中,可以利用CNN对艺术品的图像进行高效的分析和判别。通过训练神经网络,能够学习到各种瑕症的特征,包括裂纹、褪色、污渍等,从而实现对艺术品图像的自动化检测。实战体验模型:import tensorflow as tf from tensorflow.keras import layers, models # 构建卷积神经网络模型 model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(img_height, img_width, img_channels))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) # 在模型中添加全连接层和输出层 model.add(layers.Flatten()) model.add(layers.Dense(128, activation='relu')) model.add(layers.Dense(num_classes, activation='softmax')) # 编译模型 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # 训练模型 model.fit(train_images, train_labels, epochs=num_epochs, validation_data=(val_images, val_labels))通过检测到的瑕疵信息,然后利用图像处理和深度学习技术实现对艺术品图像的瑕疵修复。例如,生成对抗网络(GAN)可以通过学习艺术品图像的分布,生成具有艺术品特征的新图像,从而实现对瑕的修复。以下是一个简单的案例代码,演示如何使用卷积神经网络(CNN)进行艺术品瑕疵检测。本案例很基础,实际应用中需要更多的数据、更复杂的模型和更多的训练才能满足需求。import tensorflow as tf from tensorflow.keras import layers, models from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt import numpy as np # 数据准备 train_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( 'path_to_dataset', # 替换为你的数据集路径 target_size=(150, 150), batch_size=32, class_mode='binary' ) # 构建卷积神经网络模型 model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(128, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # 模型训练 history = model.fit(train_generator, epochs=10) # 可视化训练过程 acc = history.history['accuracy'] epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'bo', label='Training accuracy') plt.title('Training accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() # 进行瑕疵检测 def predict_defect(image_path): img = image.load_img(image_path, target_size=(150, 150)) img_array = image.img_to_array(img) img_array = np.expand_dims(img_array, axis=0) img_array /= 255.0 prediction = model.predict(img_array) if prediction[0] < 0.5: print("艺术品存在瑕疵") else: print("艺术品正常") # 替换为测试图像的路径 test_image_path = 'path_to_test_image.jpg' predict_defect(test_image_path)path_to_dataset替换为你的训练数据集路径path_to_test_image.jpg替换为需要进行瑕疵检测的测试图像路径基于卷积神经网络的艺术品瑕疵检测:import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Conv2D, Input, UpSampling2D, concatenate from tensorflow.keras.preprocessing.image import load_img, img_to_array, array_to_img from tensorflow.keras.optimizers import Adam # 数据准备 def load_data(): # 在实际应用中,你需要准备一个包含正常和瑕疵图像的大型数据集 # 这里使用一个简化的例子,加载两张示意图像 img_normal = img_to_array(load_img('path_to_normal_image.jpg', target_size=(256, 256))) / 255.0 img_defective = img_to_array(load_img('path_to_defective_image.jpg', target_size=(256, 256))) / 255.0 return np.array([img_normal]), np.array([img_defective]) # 构建瑕疵检测模型 def build_detection_model(): model = Sequential() model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(256, 256, 3))) model.add(Conv2D(64, (3, 3), activation='relu', padding='same')) model.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same')) return model # 构建瑕疵修复模型 def build_restoration_model(): input_layer = Input(shape=(256, 256, 3)) x = Conv2D(64, (3, 3), activation='relu', padding='same')(input_layer) x = Conv2D(64, (3, 3), activation='relu', padding='same')(x) encoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x) return Model(input_layer, encoded) # 构建完整的瑕疵检测与修复模型 def build_combined_model(detection_model, restoration_model): restoration_model.trainable = False combined_model = Sequential() combined_model.add(restoration_model) combined_model.add(detection_model) return combined_model # 训练模型 def train_models(detection_model, restoration_model, combined_model, img_normal, img_defective): detection_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) restoration_model.compile(optimizer='adam', loss='mse') # 训练瑕疵检测模型 detection_model.fit(img_defective, np.ones_like(img_defective), epochs=10) # 训练瑕疵修复模型 restoration_model.fit(img_normal, img_defective, epochs=10) # 训练整合模型 combined_model.compile(optimizer='adam', loss=['mse', 'binary_crossentropy']) combined_model.fit(img_normal, [img_defective, np.ones_like(img_defective)], epochs=10) # 测试模型 def test_model(combined_model, img_normal): restored_img, _ = combined_model.predict(img_normal) plt.figure(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.title('Original Image') plt.imshow(array_to_img(img_normal[0])) plt.subplot(1, 2, 2) plt.title('Restored Image') plt.imshow(array_to_img(restored_img[0])) plt.show() # 主程序 img_normal, img_defective = load_data() detection_model = build_detection_model() restoration_model = build_restoration_model() combined_model = build_combined_model(detection_model, restoration_model) train_models(detection_model, restoration_model, combined_model, img_normal, img_defective) test_model(combined_model, img_normal)基于生成对抗网络(GAN)的方法进行艺术品瑕检测与修这个代码使用了 CIFAR-10数据集,通过生成对抗网络在图像上添加随机瑕疵,并尝试修复这些瑕疵。import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Input, Conv2D, LeakyReLU, BatchNormalization, Flatten, Dense, Reshape, Conv2DTranspose from tensorflow.keras.optimizers import Adam from tensorflow.keras.datasets import cifar10 # 加载并预处理数据集 def load_and_preprocess_data(): (x_train, _), (_, _) = cifar10.load_data() x_train = x_train / 255.0 return x_train[:5000] # 在图像上生成随机瑕疵 def add_random_defects(images): defect_mask = np.random.random(size=images.shape) < 0.05 images[defect_mask] = np.random.random(size=defect_mask.sum()) return images # 构建生成器模型 def build_generator(latent_dim): model = Sequential() model.add(Dense(4 * 4 * 256, input_dim=latent_dim)) model.add(Reshape((4, 4, 256))) model.add(Conv2DTranspose(128, kernel_size=4, strides=2, padding="same")) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) model.add(Conv2DTranspose(64, kernel_size=4, strides=2, padding="same")) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) model.add(Conv2DTranspose(3, kernel_size=4, strides=2, padding="same", activation="sigmoid")) return model # 构建判别器模型 def build_discriminator(input_shape): model = Sequential() model.add(Conv2D(64, kernel_size=4, strides=2, padding="same", input_shape=input_shape)) model.add(LeakyReLU(alpha=0.2)) model.add(Conv2D(128, kernel_size=4, strides=2, padding="same")) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) model.add(Flatten()) model.add(Dense(1, activation="sigmoid")) return model # 构建生成对抗网络模型 def build_gan(generator, discriminator): discriminator.trainable = False model = Sequential() model.add(generator) model.add(discriminator) return model # 编译判别器和生成对抗网络模型 def compile_models(generator, discriminator, gan): discriminator.compile(optimizer=Adam(lr=0.0002, beta_1=0.5), loss="binary_crossentropy", metrics=["accuracy"]) gan.compile(optimizer=Adam(lr=0.0002, beta_1=0.5), loss="binary_crossentropy") # 训练模型 def train_models(generator, discriminator, gan, data, latent_dim, epochs=10000, batch_size=64): batch_count = data.shape[0] // batch_size for epoch in range(epochs): for _ in range(batch_count): noise = np.random.normal(0, 1, size=(batch_size, latent_dim)) generated_images = generator.predict(noise) real_images = data[np.random.randint(0, data.shape[0], size=batch_size)] labels_real = np.ones((batch_size, 1)) labels_fake = np.zeros((batch_size, 1)) d_loss_real = discriminator.train_on_batch(real_images, labels_real) d_loss_fake = discriminator.train_on_batch(generated_images, labels_fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) noise = np.random.normal(0, 1, size=(batch_size, latent_dim)) labels_gan = np.ones((batch_size, 1)) g_loss = gan.train_on_batch(noise, labels_gan) # 打印损失 print(f"Epoch {epoch}/{epochs} [D loss: {d_loss[0]} | D accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]") # 每 1000 次迭代保存并显示生成的图像 if epoch % 1000 == 0: save_generated_images(generator, epoch) # 保存生成的图像 def save_generated_images(generator, epoch, examples=10, dim=(1, 10), figsize=(10, 1)): noise = np.random.normal(0, 1, size=(examples, 100)) generated_images = generator.predict(noise) generated_images = generated_images.reshape(examples, 32, 32, 3) plt.figure(figsize=figsize) for i in range(generated_images.shape[0]): plt.subplot(dim[0], dim[1], i+1) plt.imshow(generated_images[i], interpolation="nearest") plt.axis("off") plt.tight_layout() plt.savefig(f"generated_image_epoch_{epoch}.png") # 主程序 latent_dim = 100 input_shape = (32, 32, 3) # 加载并预处理数据 data = load_and_preprocess_data() # 在图像上生成随机瑕疵 data_with_defects = add_random_defects(np.copy(data)) # 构建生成器、判别器和生成对抗网络模型 generator = build_generator(latent_dim) discriminator = build_discriminator(input_shape) gan = build_gan(generator, discriminator) # 编译模型 compile_models(generator, discriminator, gan) # 训练模型 train_models(generator, discriminator, gan, data_with_defects, latent_dim)总结深度学习技术在艺术品瑕疵检测与修复领域的应用为文化遗产保护带来了新的视角和可能性。通过生成对抗网络(GAN)等技术,能够实现更高精度的瑕疵检测和自动化的修复流程。这不仅提升了修复效率,还为文化遗产的数字化和数据驱动保存提供了新的途径。
  • [技术干货] Mnist手写数字分类实验
    一、实验目的搭建机器学习环境创建模型,补充并调通给定的Mnist分类代码创建算法并提高准确率二、实验内容与实验步骤环境搭建数据预处理模型建立与训练模型评估​ 这次的代码实验数据预处理、模型评估部分均已提供代码,我们只需要实现环境搭建以及模型建立与训练。在模型建立时我选择CNN模型以及adam优化器,并进行了参数的调整以提高准确性,具体的调整过程,我会在第五部分实验结果与总结中提出。三、实验环境华为云ModelArts64位电脑四、实验过程与分析环境搭建  创建OBS桶按照pdf进行对应配置,接着新建文件夹MINST并上传代码文件与数据文件。​ 按照pdf的教程创建算法并创造训练集,pdf上训练输入的参数有一点问题,应该选择到数据集所在的目录而不是选择到数据集.​ 均完成后就可以开启训练任务了。下面开始分析本次实验的代码以及调参的过程。数据预处理  在头文件配置TensorFlow和Keras运行环境,以便后续进行神经网络的训练。数据集导入部分需要在源代码基础上修改,不然会出现报错,使用moxing复制数据集到工作目录,然后提取其中的数据和标签,到这里我们的训练数据已经准备完毕。接着进行预处理,使用Keras扩展图像数据的维度,把像素值缩放保存,把标签转换为one-hot编码。import os import numpy as np import tensorflow as tf import argparse from tensorflow.keras import backend as K from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D from tensorflow.keras.layers import Dropout, MaxPooling2D, Flatten from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import to_categorical import moxing # 参数解析,用于在modelArt中读取MNIST数据集 parser = argparse.ArgumentParser() parser.add_argument('--data_url', required=True, default=None, help='test') arg = parser.parse_args() # src_url为OBS桶中数据集的路径,dst_url为执行容器中的路径,两者皆为目录,用于访问OBS中的数据 moxing.file.copy_parallel(src_url=os.path.join(arg.data_url, 'mnist.npz'), dst_url='mnist.npz') seed = 7 np.random.seed(seed) # 加载MNIST数据集 dateset_path =os.path.join(arg.data_url, 'mnist.npz') f = np.load(dateset_path) X_train, y_train = f['x_train'], f['y_train'] X_test, y_test = f['x_test'], f['y_test'] # 数据预处理 X_train = K.expand_dims(X_train, -1) X_test = K.expand_dims(X_test, -1) X_train = X_train / 255 X_test = X_test / 255 # 将标签转换为one-hot编码 y_train = tf.convert_to_tensor(to_categorical(y_train)) y_test = tf.convert_to_tensor(to_categorical(y_test)) num_classes = y_test.shape[1]模型建立与训练​ 创建一个序贯模型,表示按顺序堆叠的神经网络层。接着,通过添加卷积层和池化层,提取图像特征。第一个卷积层包含32个滤波器,每个滤波器的大小为(3, 3),使用ReLU激活函数。接着是一个最大池化层,用于降低空间维度。然后,再次添加一个卷积层和最大池化层,但这次使用64个滤波器。接下来,通过展平操作将多维数据展平为一维,为连接到全连接层做准备。​ 在全连接部分,首先添加了一个具有128个神经元的全连接层,使用ReLU激活函数。为了防止过拟合,添加了一个Dropout层,丢弃率为0.5。通过一个包含10个神经元的输出层进行多类别分类,使用softmax激活函数。最后,通过model.compile()编译模型,指定了损失函数、优化器(Adam)以及准确度。# 使用Keras定义卷积神经网络 def build_model(lr): # 创建一个序贯模型 model = Sequential() # 添加卷积层和池化层 model.add(Conv2D(filters=32, kernel_size=(3, 3),input_shape=(28, 28, 1),activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters=64, kernel_size=(3, 3),activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # 展平操作 model.add(Flatten()) # 全连接层 model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) # 输出层 model.add(Dense(10, activation='softmax')) # 打印模型概要信息 model.summary() # 编译模型,指定损失函数、优化器和评估指标 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model模型评估​ 这段代码没有作修改,作用是通过测试集评估已经训练好的卷积神经网络模型的性能,最终获得的准确度为0.9523,接下来来到最爱的调参环节。# 评估模型性能 scores = model.evaluate(X_test, y_test, verbose=0) loss, accuracy = model.evaluate(X_test, y_test, verbose=1) print('loss:%.4f accuracy:%.4f' %(loss, accuracy)) print("Baseline Error: %.2f%%" % (100 - scores[1] * 100)) ## 五、实验结果与总结 ​ 我们主要修改的就是模型建立与训练部分的代码,调整不同的参数以获得更高的准确性(但是还要防止过拟合),目前获得的准确度是0.9523. #### 学习率调参 ​ 我们可以先放大学习率来寻找一个准确性较高的学习率范围,于是调为比较大的0.01查看效果,发现正确率提高到96.88%。这时不断缩小学习率,发现准确性不断提高,直到0.03时准确率降低,所以当学习率在0.004时,学习率基本最高。 ```python # 使用 Adam 优化器和指定学习率,依次测试0.01 0.007 0.005 0.003 0.004 # model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.01), metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.007), metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.005), metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.003), metrics=['accuracy']) model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.004), metrics=['accuracy']) return modelbatch_size调参​ 我们把batch_size从200调到300看看效果,发现准确性从0.9776提高到0.9791,略微提高了一点,我们可以先尝试一下其他调参。# batch_size调参,尝试200 300 # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=200, verbose=2, steps_per_epoch=1) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=300, verbose=2, steps_per_epoch=1)epochs调参​ 增加迭代次数,随着epochs不断增加,准确性也在不断提高,但是为了防止过拟合,我们调到300,此时基本已经达到最大的准确率0.9876。暂时这就是最终的结果。# epochs调参,尝试100 200 300 # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=300, verbose=2, steps_per_epoch=1) # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, batch_size=300, verbose=2, steps_per_epoch=1) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=300, batch_size=300, verbose=2, steps_per_epoch=1)steps_per_epoch调参​ 在查找资料的时候发现,steps_per_epoch也会对结果有影响,于是跟着教程调整为了None,接着发现准确度终于突破了99大关达到了0.9915,至此我们的调参结束。# model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=300, batch_size=300, verbose=2, steps_per_epoch=1) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=300, batch_size=300, verbose=2, steps_per_epoch=None)六、附录​ 以下为源代码,注释部分为每次测试的参数,未注释为最终调整的版本。import os import numpy as np import tensorflow as tf import argparse from tensorflow.keras import backend as K from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D from tensorflow.keras.layers import Dropout, MaxPooling2D, Flatten from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import to_categorical import moxing # 参数解析,用于在modelArt中读取MNIST数据集 parser = argparse.ArgumentParser() parser.add_argument('--data_url', required=True, default=None, help='test') arg = parser.parse_args() # src_url为OBS桶中数据集的路径,dst_url为执行容器中的路径,两者皆为目录,用于访问OBS中的数据 moxing.file.copy_parallel(src_url=os.path.join(arg.data_url, 'mnist.npz'), dst_url='mnist.npz') seed = 7 np.random.seed(seed) # 加载MNIST数据集 dateset_path =os.path.join(arg.data_url, 'mnist.npz') f = np.load(dateset_path) X_train, y_train = f['x_train'], f['y_train'] X_test, y_test = f['x_test'], f['y_test'] # 数据预处理 X_train = K.expand_dims(X_train, -1) X_test = K.expand_dims(X_test, -1) X_train = X_train / 255 X_test = X_test / 255 # 将标签转换为one-hot编码 y_train = tf.convert_to_tensor(to_categorical(y_train)) y_test = tf.convert_to_tensor(to_categorical(y_test)) num_classes = y_test.shape[1] # 使用Keras定义卷积神经网络 def build_model(lr): # 创建一个序贯模型 model = Sequential() # 添加卷积层和池化层 model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # 展平操作 model.add(Flatten()) # 全连接层 model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) # 输出层 model.add(Dense(10, activation='softmax')) # 打印模型概要信息 model.summary() # 编译模型,指定损失函数、优化器和评估指标 # model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.01), metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.007), metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.005), metrics=['accuracy']) # model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.003), metrics=['accuracy']) model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.004), metrics=['accuracy']) return model # 构建模型 model = build_model(lr=1) # 训练模型 # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=200, verbose=2, steps_per_epoch=1) # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=300, verbose=2, steps_per_epoch=1) # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=300, verbose=2, steps_per_epoch=1) # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, batch_size=300, verbose=2, steps_per_epoch=1) # model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=300, batch_size=300, verbose=2, steps_per_epoch=1) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=300, batch_size=300, verbose=2, steps_per_epoch=None) # 评估模型性能 scores = model.evaluate(X_test, y_test, verbose=0) loss, accuracy = model.evaluate(X_test, y_test, verbose=1) print('loss:%.4f accuracy:%.4f' %(loss, accuracy)) print("Baseline Error: %.2f%%" % (100 - scores[1] * 100))
  • [课程学习] K-means鸢尾花聚类实验
    目的与要求实验目的:学习 PyTorch 基础: 通过实现一个简单的卷积神经网络,学习如何使用PyTorch 构建深度学习模型。熟悉卷积神经网络结构: 了解卷积层、池化层、全连接层等卷积神经网络的基本组件,以及它们在图像分类任务中的作用。实践深度学习训练循环: 通过编写训练和测试循环,理解深度学习模型的训练过程,包括前向传播、反向传播、损失计算等步骤。掌握 PyTorch 工具: 学会使用 PyTorch 提供的工具和模块,例如优化器(Optimizer)和学习率调度器(Learning Rate Scheduler)。实验结果分析: 通过测试模型并分析测试结果,评估模型在手写数字识别任务上的性能。2、 实验要求:使用 PyTorch 实现一个卷积神经网络(CNN),并使用 MNIST 数据集进行训练和测试,从而实现手写数字识别实验原理1、卷积神经网络 (CNN):一类专门设计用于处理网格状数据(如图像)的深度学习模型。它包含卷积层、池化层和全连接层。卷积层能够有效地提取图像中的特征,而池化层则用于降采样,减小计算量。这有助于CNN学习图像中的局部模式和整体结构。2、MNIST 数据集:包含手写数字图像,每个图像都是28x28像素的灰度图。它是一个常用的基准数据集,用于测试图像分类算法的性能。每个图像都带有相应的标签,表示图像中的数字。3、激活函数 (Activation Function): 在神经网络中,激活函数引入非线性性质,允许网络学习复杂的映射关系。在本实验中,Rectified Linear Unit(ReLU)是主要的激活函数,用于在卷积层和全连接层之间引入非线性。4、损失函数 (Loss Function): 用于度量模型输出与真实标签之间的差异。5、优化器 (Optimizer): 用于更新模型参数以最小化损失。6、学习率调度器 (Learning Rate Scheduler): 用于动态调整学习率,帮助模型更好地收敛。7、训练循环: 包括前向传播、反向传播和参数更新。在训练期间,模型通过多次迭代学习如何准确地预测手写数字的标签。8、测试循环: 在训练结束后,模型通过测试集进行评估。测试集上的性能指标,如损失和准确率,可用于评估模型的泛化能力。使用环境PyTorchTorchvisionPyCharm2022.3.2四、实验结果及分析    Test set:  Average Loss :0.0473  Accuracy :9851/10000 99%五、附录未修改 batch_size和learning_rate的代码:from __future__ import print_functionimport argparseimport torchimport torch.nn as nnimport torch.nn.functional as Fimport torch.optim as optimfrom torchvision import datasets, transformsfrom torch.optim.lr_scheduler import StepLR# 定义神经网络模型class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 卷积层 self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) # 丢弃层 self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) # 全连接层 self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): # 卷积和激活层 x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) # 最大池化层 x = F.max_pool2d(x, 2) x = self.dropout1(x) # 展平数据以供全连接层使用 x = torch.flatten(x, 1) # 全连接层,包括激活和丢弃 x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) # 输出层使用对数softmax激活 output = F.log_softmax(x, dim=1) return output# 训练模型的函数def train(args, model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) # 负对数似然损失 loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('训练 Epoch: {} [{}/{} ({:.0f}%)]\t损失: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) if args.dry_run: break# 测试模型的函数def test(model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device)