• 他没有输出一直卡在后面不懂,看报错应该是卡在# 循环生成诗句了


    关注 码龄 粉丝数 原力等级 -- 被采纳 被点赞 采纳率 @WzS 2024-06-17 15:30 采纳率: 9.1% 浏览 2 首页/ 编程语言 / 他没有输出一直卡在后面不懂,看报错应该是卡在# 循环生成诗句了 python机器学习深度学习 import math import re import numpy as np import tensorflow as tf from collections import Counter # 数据路径 DATA_PATH = './poetry.txt' # 单行诗最大长度 MAX_LEN = 64 # 禁用的字符,拥有以下符号的诗将被忽略 DISALLOWED_WORDS = ['(', ')', '(', ')', '__', '《', '》', '【', '】', '[', ']'] # 一首诗(一行)对应一个列表的元素 poetry = [] # 按行读取数据 poetry.txt with open(DATA_PATH, 'r', encoding='utf-8') as f: lines = f.readlines() # 遍历处理每一条数据 for line in lines: # 利用正则表达式拆分标题和内容 fields = re.split(r"[::]", line) # 跳过异常数据 if len(fields) != 2: continue # 得到诗词内容(后面不需要标题) content = fields[1] # 跳过内容过长的诗词 if len(content) > MAX_LEN - 2: continue # 跳过存在禁用符的诗词 if any(word in content for word in DISALLOWED_WORDS): continue poetry.append(content.replace('\n', '')) # 最后要记得删除换行符 # 最小词频 MIN_WORD_FREQUENCY = 8 # 统计词频,利用Counter可以直接按单个字符进行统计词频 counter = Counter() for line in poetry: counter.update(line) # 过滤掉低词频的词 tokens = [token for token, count in counter.items() if count >= MIN_WORD_FREQUENCY] # 补上特殊词标记:填充字符标记、未知词标记、开始标记、结束标记 tokens = ["[PAD]", "[NONE]", "[START]", "[END]"] + tokens # 映射: 词 -> 编号 word_idx = {} # 映射: 编号 -> 词 idx_word = {} for idx, word in enumerate(tokens): word_idx[word] = idx idx_word[idx] = word #分词器 class Tokenizer: """ 分词器 """ def __init__(self, tokens): # 词汇表大小 self.dict_size = len(tokens) # 生成映射关系 self.token_id = {} # 映射: 词 -> 编号 self.id_token = {} # 映射: 编号 -> 词 for idx, word in enumerate(tokens): self.token_id[word] = idx self.id_token[idx] = word # 各个特殊标记的编号id,方便其他地方使用 self.start_id = self.token_id["[START]"] self.end_id = self.token_id["[END]"] self.none_id = self.token_id["[NONE]"] self.pad_id = self.token_id["[PAD]"] def id_to_token(self, token_id): """ 编号 -> 词 """ return self.id_token.get(token_id) def token_to_id(self, token): """ 词 -> 编号 """ return self.token_id.get(token, self.none_id) def encode(self, tokens): """ 词列表 -> [START]编号 + 编号列表 + [END]编号 """ token_ids = [self.start_id, ] # 起始标记 # 遍历,词转编号 for token in tokens: token_ids.append(self.token_to_id(token)) token_ids.append(self.end_id) # 结束标记 return token_ids def decode(self, token_ids): """ 编号列表 -> 词列表(去掉起始、结束标记) """ # 起始、结束标记 flag_tokens = {"[START]", "[END]"} tokens = [] for idx in token_ids: token = self.id_to_token(idx) # 跳过起始、结束标记 if token not in flag_tokens: tokens.append(token) return tokens tokenizer = Tokenizer(tokens) #数据集生成器 class PoetryDataSet: """ 古诗数据集生成器 """ def __init__(self, data, tokenizer, batch_size): # 数据集 self.data = data self.total_size = len(self.data) # 分词器,用于词转编号 self.tokenizer = tokenizer # 每批数据量 self.batch_size = batch_size # 每个epoch迭代的步数 self.steps = int(math.floor(len(self.data) / self.batch_size)) # 计算最大长度 self.max_length = max(map(len, data)) + 2 # 加2是为了包含[START]和[END]标记 # ... 其他方法保持不变 ... def pad_line(self, line, padding=None): """ 对齐单行数据 """ if padding is None: padding = self.tokenizer.pad_id # 使用max_length进行填充 padding_length = self.max_length - len(line) if padding_length > 0: return line + [padding] * padding_length else: return line[:self.max_length] def __len__(self): return self.steps def __iter__(self): # 打乱数据 np.random.shuffle(self.data) # 迭代一个epoch,每次yield一个batch for start in range(0, self.total_size, self.batch_size): end = min(start + self.batch_size, self.total_size) data = self.data[start:end] max_length = max(map(len, data)) batch_data = [] for str_line in data: # 对每一行诗词进行编码、并补齐padding encode_line = self.tokenizer.encode(str_line) pad_encode_line = self.pad_line(encode_line, max_length + 2) # 加2是因为tokenizer.encode会添加START和END batch_data.append(pad_encode_line) batch_data = np.array(batch_data) # yield 特征、标签 yield batch_data[:, :-1], batch_data[:, 1:] def generator(self): while True: yield from self.__iter__() BATCH_SIZE = 32 dataset = PoetryDataSet(poetry, tokenizer, BATCH_SIZE) print(tokenizer.dict_size) #模型的构建与训练 model = tf.keras.Sequential([ # 词嵌入层 tf.keras.layers.Embedding(input_dim=tokenizer.dict_size, output_dim=150), # 第一个LSTM层 tf.keras.layers.LSTM(150, dropout=0.5, return_sequences=True), # 第二个LSTM层 tf.keras.layers.LSTM(150, dropout=0.5, return_sequences=True), # 利用TimeDistributed对每个时间步的输出都做Dense操作(softmax激活) tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(tokenizer.dict_size, activation='softmax')), ]) model.compile( optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) ) model.summary() # model.fit( # dataset.generator(), # steps_per_epoch=dataset.steps, # epochs=1 # ) model.save("./rnn_model.h5") #预测 def predict_and_update(model, tokenizer, s, token_ids, poetry, punctuation_ids): # 5.1) 进行预测,只保留第一个样例(我们输入的样例数只有1)的、最后一个token的预测的输出 output = model(np.array([token_ids], dtype=np.int32)) _probas = output.numpy()[0, -1, 3:] del output # 5.2) 重新计算预测概率 p_args = _probas.argsort()[::-1][:100] p = _probas[p_args] p = p / sum(p) # 5.3) 根据概率,随机选择一个词作为预测结果 target_index = np.random.choice(len(p), p=p) target = p_args[target_index] + 3 # 5.4) 保存 token_ids.append(target) if target > 3: poetry.append(tokenizer.id_to_token(target)) if target in punctuation_ids: return True # Indicates end of a line return False # Continue generating text import tensorflow as tf # 加载模型 model_path = "D:/Python学习专用/pythonProject/rnn_model.h5" model = tf.keras.models.load_model(model_path) # 定义输入字符串和初始 token_ids s = "your_input_text" token_ids = [tokenizer.start_id] # 替换为实际的开始标记ID poetry = [] # 定义标点符号的 token_ids punctuation_ids = [ tokenizer.token_to_id("[PAD]"), tokenizer.token_to_id("[NONE]"), tokenizer.token_to_id("[START]"), tokenizer.token_to_id("[END]") ] # 循环生成诗句 while True: if predict_and_update(model, tokenizer, s, token_ids, poetry, punctuation_ids): break # 输出生成的诗句 print("生成的诗歌:") print(" ".join(poetry)) D:\Python\python.exe D:\Python学习专用\pythonProject\test2.py 2024-06-17 15:28:07.897730: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. 2024-06-17 15:28:08.906320: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. 3434 2024-06-17 15:28:11.543051: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. Model: "sequential" ┌─────────────────────────────────┬────────────────────────┬───────────────┐ │ Layer (type) │ Output Shape │ Param # │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ embedding (Embedding) │ ? │ 0 (unbuilt) │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ lstm (LSTM) │ ? │ 0 (unbuilt) │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ lstm_1 (LSTM) │ ? │ 0 (unbuilt) │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ time_distributed │ ? │ 0 (unbuilt) │ │ (TimeDistributed) │ │ │ └─────────────────────────────────┴────────────────────────┴───────────────┘ Total params: 0 (0.00 B) Trainable params: 0 (0.00 B) Non-trainable params: 0 (0.00 B) WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. WARNING:absl:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model. Traceback (most recent call last): File "D:\Python学习专用\pythonProject\test2.py", line 268, in if predict_and_update(model, tokenizer, s, token_ids, poetry, punctuation_ids): File "D:\Python学习专用\pythonProject\test2.py", line 222, in predict_and_update output = model(np.array([token_ids], dtype=np.int32)) File "D:\Python\lib\site-packages\keras\src\utils\traceback_utils.py", line 118, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\layers\layer.py", line 816, in __call__ outputs = super().__call__(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\utils\traceback_utils.py", line 118, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\ops\operation.py", line 42, in __call__ return call_fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\utils\traceback_utils.py", line 157, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\models\sequential.py", line 203, in call return self._functional.call(inputs, training=training, mask=mask) File "D:\Python\lib\site-packages\keras\src\models\functional.py", line 188, in call outputs = self._run_through_graph( File "D:\Python\lib\site-packages\keras\src\ops\function.py", line 153, in _run_through_graph outputs = operation_fn(node.operation)(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\models\functional.py", line 572, in call return operation(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\utils\traceback_utils.py", line 118, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\layers\layer.py", line 816, in __call__ outputs = super().__call__(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\utils\traceback_utils.py", line 118, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\ops\operation.py", line 42, in __call__ return call_fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\utils\traceback_utils.py", line 157, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\keras\src\layers\rnn\time_distributed.py", line 108, in call [step_function(i) for i in range(inputs.shape[0])] File "D:\Python\lib\site-packages\keras\src\layers\rnn\time_distributed.py", line 108, in [step_function(i) for i in range(inputs.shape[0])] File "D:\Python\lib\site-packages\keras\src\layers\rnn\time_distributed.py", line 102, in step_function return self.layer.call(inputs[i], **kwargs) File "D:\Python\lib\site-packages\tensorflow\python\util\traceback_utils.py", line 150, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\tensorflow\python\util\dispatch.py", line 1260, in op_dispatch_handler return dispatch_target(*args, **kwargs) File "D:\Python\lib\site-packages\tensorflow\python\ops\tensor_getitem_override.py", line 230, in _slice_helper array_ops_stack.stack(begin), File "D:\Python\lib\site-packages\tensorflow\python\util\traceback_utils.py", line 150, in error_handler return fn(*args, **kwargs) File "D:\Python\lib\site-packages\tensorflow\python\util\dispatch.py", line 1260, in op_dispatch_handler return dispatch_target(*args, **kwargs) File "D:\Python\lib\site-packages\tensorflow\python\ops\array_ops_stack.py", line 74, in stack return ops.convert_to_tensor(values, name=name) File "D:\Python\lib\site-packages\tensorflow\python\profiler\trace.py", line 183, in wrapped return func(*args, **kwargs) File "D:\Python\lib\site-packages\tensorflow\python\framework\ops.py", line 713, in convert_to_tensor return tensor_conversion_registry.convert( File "D:\Python\lib\site-packages\tensorflow\python\framework\tensor_conversion_registry.py", line 234, in convert ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) File "D:\Python\lib\site-packages\tensorflow\python\framework\constant_tensor_conversion.py", line 29, in _constant_tensor_conversion_function return constant_op.constant(v, dtype=dtype, name=name) File "D:\Python\lib\site-packages\tensorflow\python\ops\weak_tensor_ops.py", line 142, in wrapper return op(*args, **kwargs) File "D:\Python\lib\site-packages\tensorflow\python\framework\constant_op.py", line 276, in constant return _constant_impl(value, dtype, shape, name, verify_shape=False, File "D:\Python\lib\site-packages\tensorflow\python\framework\constant_op.py", line 285, in _constant_impl if ctx.executing_eagerly(): KeyboardInterrupt 进程已结束,退出代码为 -1073741510 (0xC000013A: interrupted by Ctrl+C) 展开全部 收起 写回答 好问题 0 提建议 追加酬金 关注问题 微信扫一扫 点击复制链接 分享 邀请回答 编辑 收藏 删除 结题 收藏 举报 追加酬金 (90%的用户在追加酬金后获得了解决方案) 当前问题酬金 ¥ 0 (可追加 ¥500) ¥ 15¥ 20¥ 50¥ 100¥ 200 支付方式 扫码支付 二维码出错 点击刷新 支付金额 15 元 提供问题酬金的用户不参与问题酬金结算和分配 支付即为同意 《付费问题酬金结算规则》 结题 再想想 删除 再等等

    3条回答 默认 最新

    • 专家-皆知 2024-06-17 15:31
      关注

      参考免费微信小程序:皆我百晓生 作答,经由专家-皆知整理回复,如问题解决,望采纳;如仍有疑问,请评论回复。

      从提供的信息来看,主要的问题在于模型的加载和预测过程中出现了中断。这通常是因为模型的结构在加载时未能正确初始化参数,导致在预测时遇到形状不匹配或其他运行时错误。为了解决这个问题,需要确保模型在加载后正确地适应输入数据的形状。

      在你给出的代码中,模型的构建部分没有指定输入的形状,这可能导致加载后的模型在尝试预测时遇到形状不匹配的问题。此外,predict_and_update函数中的预测部分也需要确保输入数据的形状与模型期望的形状相匹配。

      下面是一个修改后的代码片段,重点是确保模型在加载后通过编译和调用build方法来初始化其权重,并且在预测时输入的数据形状正确。

      修改模型定义以明确指定输入形状

      # 模型的构建与训练
      model = tf.keras.Sequential([
          # 词嵌入层
          tf.keras.layers.Embedding(input_dim=tokenizer.dict_size, output_dim=150, input_length=MAX_LEN+2),  # 明确指定输入长度
          # 第一个LSTM层
          tf.keras.layers.LSTM(150, dropout=0.5, return_sequences=True),
          # 第二个LSTM层
          tf.keras.layers.LSTM(150, dropout=0.5, return_sequences=True),
          # 利用TimeDistributed对每个时间步的输出都做Dense操作(softmax激活)
          tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(tokenizer.dict_size, activation='softmax')),
      ])
      
      model.compile(
          optimizer=tf.keras.optimizers.Adam(),
          loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
      )
      model.build(input_shape=(None, MAX_LEN+2))  # 明确指定输入形状
      model.summary()
      

      在预测前检查模型是否已经编译和构建

      # 加载模型
      model_path = "D:/Python学习专用/pythonProject/rnn_model.h5"
      model = tf.keras.models.load_model(model_path)
      
      # 确保模型已经构建和编译
      if not model.built:
          model.build(input_shape=(None, MAX_LEN+2))
      if not model._is_compiled:
          model.compile(
              optimizer=tf.keras.optimizers.Adam(),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
          )
      
      # 确保模型已经初始化其权重
      model.summary()
      

      predict_and_update函数中确保输入数据形状正确

      # 循环生成诗句
      token_ids = [tokenizer.start_id]  # 替换为实际的开始标记ID
      poetry = []
      
      # 定义标点符号的 token_ids
      punctuation_ids = [
          tokenizer.token_to_id("[PAD]"),
          tokenizer.token_to_id("[NONE]"),
          tokenizer.token_to_id("[START]"),
          tokenizer.token_to_id("[END]")
      ]
      
      # 确保输入数据的形状与模型期望的形状相匹配
      input_data = np.array([token_ids], dtype=np.int32)
      input_data = np.pad(input_data, ((0, 0), (0, MAX_LEN+2-len(input_data[0]))), mode='constant', constant_values=tokenizer.pad_id)
      
      while True:
          output = model(input_data)  # 确保输入数据形状正确
          ...
      

      以上修改应该可以解决模型加载后预测时的形状不匹配问题,以及确保整个流程顺畅执行。如果还有其他问题,比如模型训练不足或数据预处理不当,可能需要进一步调整模型训练过程或数据预处理逻辑。

      展开全部

      评论 编辑记录
    • 阿里嘎多学长 2024-06-17 15:31
      关注

      以下内容由CHATGPT及阿里嘎多学长共同生成、有用望采纳:


      从您提供的代码和错误日志来看,程序卡顿和最终抛出KeyboardInterrupt异常的原因可能有几个方面:

      1. 无限循环:在predict_and_update函数中,存在一个while True:的无限循环,如果循环内的条件不满足,程序将一直运行下去,直到被外部中断。

      2. 模型预测问题:在predict_and_update函数中,模型预测调用model(np.array([token_ids], dtype=np.int32))时,如果token_ids列表为空或者模型的输入维度不匹配,可能导致程序卡顿或异常。

      3. 程序逻辑问题:在predict_and_update函数中,if predict_and_update(model, tokenizer, s, token_ids, poetry, punctuation_ids):这个条件判断可能会导致递归调用自身,如果没有正确的退出条件,也可能导致无限循环。

      4. 外部中断KeyboardInterrupt异常通常是因为用户使用Ctrl+C中断了程序的执行,这可能是因为程序长时间运行没有结果,用户选择手动停止。

      为了解决这些问题,您可以尝试以下步骤:

      • 检查循环退出条件:确保predict_and_update函数中有明确的退出条件,比如生成一定数量的诗句后退出循环。

      • 增加日志输出:在循环中增加日志输出,这样可以帮助您监控程序的执行状态,及时发现问题所在。

      • 检查模型输入:确保传递给模型的输入数据是正确的,比如token_ids列表不应该是空的,且数据类型和维度应该与模型的输入要求一致。

      • 异常处理:在代码中添加异常处理机制,比如try...except,捕获可能的异常并给出提示,避免程序直接崩溃。

      • 优化程序逻辑:检查predict_and_update函数的逻辑,避免递归调用自身,确保函数有明确的结束条件。

      下面是一个简化的示例,展示如何增加退出条件和日志输出:

      def predict_and_update(model, tokenizer, s, token_ids, poetry, punctuation_ids, max_iterations=100):
          for _ in range(max_iterations):
              # 进行预测...
              # 假设这里是预测的代码
              if should_exit_condition():  # 这里应该是您的退出条件
                  break
          else:
              print("Reached maximum iterations without exiting.")
          return False  # 根据您的逻辑调整返回值
      
      # 在主循环中调用函数
      try:
          while True:
              if predict_and_update(model, tokenizer, s, token_ids, poetry, punctuation_ids):
                  break
      except KeyboardInterrupt:
          print("Program interrupted by user.")
      

      请注意,这只是一个示例,您需要根据实际的程序逻辑来调整代码。希望这些建议能帮助您解决问题。

      展开全部

      评论 编辑记录
    • Kwan的解忧杂货铺 2024-06-17 15:36
      关注

      下午好🌅🌅🌅
      本答案参考ChatGPT-3.5

      在你的代码中,有两个问题需要解决:

      1. 多余的循环 for line in lines: 在该循环中,我们没有明确知道已经遍历了所有数据。我们可以使用条件语句来判断是否完整遍历完所有的数据:

        if not all(i < len(lines) for i in range(len(lines))):
            raise ValueError("Did not read all the lines.")
        
      2. 如果我们的文本文件太大,导致无法一次性加载到内存中,我们需要采用并行处理的方式。

      以下是针对这些问题修改后的代码:

      import math
      import re
      import numpy as np
      import tensorflow as tf
      from collections import Counter
      
      # 数据路径
      DATA_PATH = './poetry.txt'
      # 单行诗最大长度
      MAX_LEN = 64
      # 禁用的字符,拥有以下符号的诗将被忽略
      DISALLOWED_WORDS = ['(', ')', '(', ')', '__', '<', '>', '[', ']']
      
      # 一首诗(一行)对应一个列表的元素
      poetry = []
      
      # 按行读取数据 poetry.txt
      with open(DATA_PATH, 'r', encoding='utf-8') as f:
          lines = f.readlines()
      
      # 将所有行保存为列表
      all_lines = [line.strip() for line in lines]
      
      # 使用collections.Counter计算每个单词出现的次数
      word_counts = Counter(all_lines)
      
      # 使用最小词频过滤掉低词频的词
      tokens = [token for token, count in word_counts.most_common(10)]
      
      # 创建数据集生成器
      class PoetryDataSet:
          """
          古诗数据集生成器
          """
      
          def __init__(self, data, tokenizer, batch_size):
              # 数据集
              self.data = data
              # 定义要划分的数据量
              self.max_len = max(map(len, data)) + 2  # 加2是为了包含[START]和[END]标记
              self.batch_size = batch_size
              # 初始化词表和ID值字典
              self.tokenizer_dict = {}
              self.word_id_dict = {}
      
              # 计算总词数
              total_words = sum([len(token) for token in all_lines])
      
              # 建立每个单词及其对应的ID值
              for word in all_lines:
                  self.tokenizer_dict[word] = list(range(total_words))
                  self.word_id_dict[word] = word
      
              # 将输入的单词转换成ID值
              for token in all_lines:
                  if token in self.tokenizer_dict:
                      word_id = self.tokenizer_dict[token]
                      if self.word_id_dict.get(word_id, 0) == 0:
                          self.word_id_dict[word_id] = 1
      
          def pad_line(self, line, padding=None):
              """
              对齐单行数据
              """
              if padding is None:
                  padding = self.tokenizer.pad_id
      
              # 使用max_length进行填充
              padding_length = self.max_length - len(line)
              if padding_length > 0:
                  return line + [padding] * padding_length
              else:
                  return line[:self.max_length]
      
          def next_batch(self):
              """
              排序输入的行,并获取下一个批次
              """
              while True:
                  # 获取待处理的行列表
                  for i, line in enumerate(all_lines, start=1):
                      yield line
      
                  # 放置到队列中
                  all_lines.sort(key=lambda x: x[0])
                  if len(all_lines) % self.batch_size == 0:
                      break
      
      
      # 创建诗歌数据集并生成数据
      dataset = PoetryDataSet(all_lines, tokenizer, 16)
      print(dataset.next_batch())
      

      现在这个程序应该能够正确地读取诗歌数据,将其转化为数据集,并进行相应的处理。

      展开全部

      评论
    编辑
    预览

    报告相同问题?

  • 相关阅读:
    windows 驱动与内核调试 学习2
    智慧住建解决方案-最新全套文件
    FAST-LIO论文阅读
    基于jsp+ssm的家庭理财系统
    Linux(05) Debian 系统修改主机名
    大数据技术之-presto
    UDP通信原理及网络编程
    Web自动化测试怎么做?Web自动化测试的详细流程和步骤
    Ubuntu Netplane balancing algorithm modes
    【基于stm32f407应用】中断模式串口通讯-USART1
  • 原文地址:https://ask.csdn.net/questions/8119798