AnlaAnla 2 years ago
parent
commit
374e436aff
7 changed files with 491 additions and 87 deletions
  1. 2 2
      Image2YoyoImage.py
  2. 68 78
      Milvus_Test.py
  3. 267 0
      Milvus_Test_effnet.py
  4. 10 2
      MyEfficientNet.py
  5. 67 0
      MyModel2.py
  6. 34 5
      test04.py
  7. 43 0
      测试日志.log

+ 2 - 2
Image2YoyoImage.py

@@ -10,8 +10,8 @@ vec_num = 0
 yolo_model = torch.hub.load(r"C:\Users\Administrator\.cache\torch\hub\ultralytics_yolov5_master", 'custom',
                             path="yolov5s.pt", source='local')
 
-dataset_path = [r"D:\Code\ML\images\Mywork3\card_database\prizm\21-22\*\*"]
-yolo_dataset_dir = r"D:\Code\ML\images\Mywork3\card_database_yolo"
+dataset_path = [r"D:\Code\ML\images\test02\test(mosaic,pz)\*\*\*\*"]
+yolo_dataset_dir = r"D:\Code\ML\images\test02\test(mosaic,pz)_yolo"
 
 
 def get_save_dir(save_dir, source_path):

+ 68 - 78
Milvus_Test.py

@@ -6,8 +6,8 @@ import PIL.Image as Image
 import numpy as np
 from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility
 
-from MyModel import MyModel
-from MyEfficientNet import MyEfficient
+from MyModel2 import MyModel
+
 import torch
 from transformers import ViTFeatureExtractor, ViTModel
 from towhee.types.image_utils import to_image_color
@@ -16,15 +16,16 @@ connections.connect(host='127.0.0.1', port='19530')
 dataset_path = ["D:\Code\ML\images\Mywork3\card_database_yolo/*/*/*/*"]
 
 img_id = 0
+yolo_num = 0
 vec_num = 0
-myModel = MyModel(r"D:\Code\ML\model\card_cls\res_card_out764_freeze4.pth", out_features=764)
+myModel = MyModel(r"D:\Code\ML\model\card_cls\res_card_out764_freeze5.pth", out_features=764)
 # myModel = MyModel(r"C:\Users\Administrator\.cache\torch\hub\checkpoints\resnet50-0676ba61.pth", out_features=1000)
 
 # myModel = MyEfficient('')
 
 
-yolo_model = torch.hub.load(r"C:\Users\Administrator\.cache\torch\hub\ultralytics_yolov5_master", 'custom',
-                            path="yolov5s.pt", source='local')
+# yolo_model = torch.hub.load(r"C:\Users\Administrator\.cache\torch\hub\ultralytics_yolov5_master", 'custom',
+#                             path="yolov5s.pt", source='local')
 
 
 # yolo_model = torch.hub.load("ultralytics/yolov5", "yolov5s")
@@ -80,36 +81,40 @@ def read_imgID(results):
     return imgIDs
 
 
-def yolo_detect(img):
-    results = yolo_model(img)
-
-    pred = results.pred[0][:, :4].cpu().numpy()
-    boxes = pred.astype(np.int32)
-
-    max_img = get_object(img, boxes)
-    return max_img
-
-
-def get_object(img, boxes):
-    if isinstance(img, str):
-        img = Image.open(img)
-
-    if len(boxes) == 0:
-        return img
-
-    max_area = 0
-
-    # 选出最大的框
-    x1, y1, x2, y2 = 0, 0, 0, 0
-    for box in boxes:
-        temp_x1, temp_y1, temp_x2, temp_y2 = box
-        area = (temp_x2 - temp_x1) * (temp_y2 - temp_y1)
-        if area > max_area:
-            max_area = area
-            x1, y1, x2, y2 = temp_x1, temp_y1, temp_x2, temp_y2
-
-    max_img = img.crop((x1, y1, x2, y2))
-    return max_img
+# def yolo_detect(img):
+#     results = yolo_model(img)
+#
+#     pred = results.pred[0][:, :4].cpu().numpy()
+#     boxes = pred.astype(np.int32)
+#
+#     max_img = get_object(img, boxes)
+#
+#     global yolo_num
+#     yolo_num += 1
+#     print("yolo_num: ", yolo_num)
+#     return max_img
+#
+#
+# def get_object(img, boxes):
+#     if isinstance(img, str):
+#         img = Image.open(img)
+#
+#     if len(boxes) == 0:
+#         return img
+#
+#     max_area = 0
+#
+#     # 选出最大的框
+#     x1, y1, x2, y2 = 0, 0, 0, 0
+#     for box in boxes:
+#         temp_x1, temp_y1, temp_x2, temp_y2 = box
+#         area = (temp_x2 - temp_x1) * (temp_y2 - temp_y1)
+#         if area > max_area:
+#             max_area = area
+#             x1, y1, x2, y2 = temp_x1, temp_y1, temp_x2, temp_y2
+#
+#     max_img = img.crop((x1, y1, x2, y2))
+#     return max_img
 
 
 # 创建向量数据库
@@ -166,12 +171,23 @@ def query_by_imgID(collection, img_id, limit=1):
     return res
 
 
-def from_path_get_series(path):
+# 分别返回 编号,年份,系列
+def from_path_get_info(path):
+    card_info = []
     for i in range(3):
         path = os.path.split(path)[0]
-    series = os.path.split(path)[-1]
+        card_info.append(os.path.split(path)[-1])
+    card_info[0] = card_info[0].split('#')[-1]
+    return card_info
 
-    return series
+
+def from_query_path_get_info(path):
+    card_info = []
+    for i in range(3):
+        path = os.path.split(path)[0]
+        card_info.append(os.path.split(path)[-1])
+    card_info[0] = card_info[0].split(' ')[0]
+    return card_info
 
 
 if __name__ == '__main__':
@@ -186,12 +202,12 @@ if __name__ == '__main__':
     collection = is_creat_collection(have_coll=have_coll, collection_name="reverse_image_search_myModel")
 
     # 测试的图片路径
-    img_path = ["D:/Code/ML/images/test02/test2/*/*/*/*"]
+    img_path = ["D:/Code/ML/images/test02/test(mosaic,pz)/*/*/*/*"]
 
     data = (towhee.glob['path'](*img_path)
             # image_decode['path', 'img']().
-            .runas_op['path', "object"](yolo_detect)
-            .runas_op['object', 'vec'](func=img2vec)
+            # .runas_op['path', "object"](yolo_detect)
+            .runas_op['path', 'vec'](func=img2vec)
             .tensor_normalize['vec', 'vec']()
             # image_embedding.timm['img', 'vec'](model_name='resnet50').
             .ann_search.milvus['vec', 'result'](collection=collection, limit=3)
@@ -206,7 +222,6 @@ if __name__ == '__main__':
     #
     # print(res[0])
 
-
     top3_num = 0
     top1_num = 0
     test_img_num = len(list(data))
@@ -215,35 +230,29 @@ if __name__ == '__main__':
     for i in range(test_img_num):
         top3_flag = False
 
-        # 获取图片真正的系列
-        source_card_series = from_path_get_series(data[i].path)
-        # 获取图片真正的编号
-        source_num = os.path.split(os.path.split(data[i].path)[0])[-1].split('#')[-1]
+        # 获取图片真正的编号, 年份, 系列
+        source_code, source_year, source_series = from_path_get_info(data[i].path)
 
         # 每个测试图片返回三个最相似的图片ID,一一测试
         for j in range(3):
             res = query_by_imgID(collection, data[i].result_imgID[j])
 
-            # 获取预测的图片的系列
-            result_card_series = from_path_get_series(res[0]['path'])
-            # 获取预测的图片的编号
-            result_num = os.path.split(os.path.split(res[0]['path'])[0])[-1].split(' ')[0].split('#')[-1]
+            # 获取预测的图片的编号, 年份, 系列
+            result_code, result_year, result_series = from_query_path_get_info(res[0]['path'])
 
             # 判断top1是否正确
-            if j == 0 and source_num == result_num and source_card_series == result_card_series:
+            if j == 0 and source_code == result_code and source_year == result_year and source_series == result_series:
                 top1_num += 1
+                print(top1_num)
+            elif j == 0:
+                print('top_1 错误')
 
             # top3中有一个正确的标记为正确
-            if source_num == result_num and source_card_series == result_card_series:
+            if source_code == result_code and source_year == result_year and source_series == result_series:
                 top3_flag = True
 
-            # 日志
-            if j == 0 and source_num == result_num and source_card_series == result_card_series:
-                print(top1_num)
-            elif j == 0:
-                print('top_1 错误')
-            print("series: {}, num: {} === result - series: {}, num: {}".format(
-                source_card_series, source_num, result_card_series, result_num
+            print("series: {}, year: {},code: {} === result - series: {}, year: {}, code: {}".format(
+                source_series, source_year, source_code, result_series, result_year, result_code,
             ))
 
         if top3_flag:
@@ -258,22 +267,3 @@ if __name__ == '__main__':
     print("top3 准确率:{} % \n top1 准确率: {} %".
           format(top3_accuracy, top1_accuracy))
 
-'''
- 测试图片共:  168
- 自定义resnet50_freeze_out421 + yolo + normalize
-top3 准确率:96.42857142857143 % 
- top1 准确率: 95.23809523809523 %
-
-
-测试图片: 773, 数据库图片: 5848
-自定义resnet50_freeze_out421 + yolo + normalize
-测试图片共:  773
-top3 准确率:96.63648124191462 % 
- top1 准确率: 95.60155239327295 %
-
- 
- 测试图片: 773, 数据库图片: 5848
- 自定义resnet50_out764_freeze + yolo + normalize
-top3 准确率:96.76584734799482 % 
- top1 准确率: 96.50711513583441 %
-'''

+ 267 - 0
Milvus_Test_effnet.py

@@ -0,0 +1,267 @@
+import towhee
+import cv2
+from towhee._types.image import Image
+import os
+import PIL.Image as Image
+import numpy as np
+from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility
+
+from MyEfficientNet import MyModel
+import torch
+
+
+connections.connect(host='127.0.0.1', port='19530')
+dataset_path = ["D:\Code\ML\images\Mywork3\card_database_yolo/*/*/*/*"]
+
+img_id = 0
+yolo_num = 0
+vec_num = 0
+myModel = MyModel(r"D:\Code\ML\model\card_cls\effcient_card_out854_freeze2.pth", out_features=854)
+
+# yolo_model = torch.hub.load(r"C:\Users\Administrator\.cache\torch\hub\ultralytics_yolov5_master", 'custom',
+#                             path="yolov5s.pt", source='local')
+
+
+# yolo_model = torch.hub.load("ultralytics/yolov5", "yolov5s")
+
+
+# 生成ID
+def get_id(param):
+    global img_id
+    img_id += 1
+    return img_id
+
+
+# def eff_enbedding(img):
+#     global vec_num
+#     vec_num += 1
+#     print('vec: ', vec_num)
+#     return myModel.predict(img)
+
+# 生成向量
+def img2vec(img):
+    global vec_num
+    vec_num += 1
+    print('vec: ', vec_num)
+    return myModel.predict(img)
+
+
+# 生成信息
+path_num = 0
+
+
+def get_info(path):
+    path = os.path.split(path)[0]
+
+    path, num_and_player = os.path.split(path)
+    num = num_and_player.split(' ')[0]
+    player = ' '.join(os.path.split(num_and_player)[-1].split(' ')[1:])
+    path, year = os.path.split(path)
+    series = os.path.split(path)[1]
+    rtn = "{} {} {} #{}".format(series, year, player, num)
+
+    global path_num
+    path_num += 1
+    print(path_num, " loading " + rtn)
+    return rtn
+
+
+def read_imgID(results):
+    imgIDs = []
+    for re in results:
+        # 输出结果图片信息
+        print('---------', re)
+        imgIDs.append(re.id)
+    return imgIDs
+
+
+# def yolo_detect(img):
+#     results = yolo_model(img)
+#
+#     pred = results.pred[0][:, :4].cpu().numpy()
+#     boxes = pred.astype(np.int32)
+#
+#     max_img = get_object(img, boxes)
+#
+#     global yolo_num
+#     yolo_num += 1
+#     print("yolo_num: ", yolo_num)
+#     return max_img
+#
+#
+# def get_object(img, boxes):
+#     if isinstance(img, str):
+#         img = Image.open(img)
+#
+#     if len(boxes) == 0:
+#         return img
+#
+#     max_area = 0
+#
+#     # 选出最大的框
+#     x1, y1, x2, y2 = 0, 0, 0, 0
+#     for box in boxes:
+#         temp_x1, temp_y1, temp_x2, temp_y2 = box
+#         area = (temp_x2 - temp_x1) * (temp_y2 - temp_y1)
+#         if area > max_area:
+#             max_area = area
+#             x1, y1, x2, y2 = temp_x1, temp_y1, temp_x2, temp_y2
+#
+#     max_img = img.crop((x1, y1, x2, y2))
+#     return max_img
+
+
+# 创建向量数据库
+def create_milvus_collection(collection_name, dim):
+    if utility.has_collection(collection_name):
+        utility.drop_collection(collection_name)
+
+    fields = [
+        FieldSchema(name='img_id', dtype=DataType.INT64, is_primary=True),
+        FieldSchema(name='path', dtype=DataType.VARCHAR, max_length=300),
+        FieldSchema(name="info", dtype=DataType.VARCHAR, max_length=300),
+        FieldSchema(name='embedding', dtype=DataType.FLOAT_VECTOR, descrition='image embedding vectors', dim=dim)
+    ]
+    schema = CollectionSchema(fields=fields, description='reverse image search')
+    collection = Collection(name=collection_name, schema=schema)
+
+    index_params = {
+        'metric_type': 'L2',
+        'index_type': "IVF_FLAT",
+        'params': {"nlist": dim}
+    }
+    collection.create_index(field_name="embedding", index_params=index_params)
+    return collection
+
+
+# 判断是否加载已有数据库,或新创建数据库
+def is_creat_collection(have_coll, collection_name):
+    if have_coll:
+        # 连接现有的数据库
+        collection = Collection(name=collection_name)
+    else:
+        # 新建立数据库
+        collection = create_milvus_collection(collection_name, 2560)
+        dc = (
+            towhee.glob['path'](*dataset_path)
+            .runas_op['path', 'img_id'](func=get_id)
+            .runas_op['path', 'info'](func=get_info)
+            # .image_decode['path', 'img']()
+            # .runas_op['path', "object"](yolo_detect)
+            .runas_op['path', 'vec'](func=img2vec)
+            .tensor_normalize['vec', 'vec']()
+            # .image_embedding.timm['img', 'vec'](model_name='resnet50')
+            .ann_insert.milvus[('img_id', 'path', 'info', 'vec'), 'mr'](collection=collection)
+        )
+
+    print('Total number of inserted data is {}.'.format(collection.num_entities))
+    return collection
+
+
+# 通过ID查询
+def query_by_imgID(collection, img_id, limit=1):
+    expr = 'img_id == ' + str(img_id)
+    res = collection.query(expr, output_fields=["path", "info"], offset=0, limit=limit, timeout=2)
+    return res
+
+
+# 分别返回 编号,年份,系列
+def from_path_get_info(path):
+    card_info = []
+    for i in range(3):
+        path = os.path.split(path)[0]
+        card_info.append(os.path.split(path)[-1])
+    card_info[0] = card_info[0].split('#')[-1]
+    return card_info
+
+
+def from_query_path_get_info(path):
+    card_info = []
+    for i in range(3):
+        path = os.path.split(path)[0]
+        card_info.append(os.path.split(path)[-1])
+    card_info[0] = card_info[0].split(' ')[0]
+    return card_info
+
+
+if __name__ == '__main__':
+    print('start')
+
+    # 是否存在数据库
+    have_coll = True
+
+    # 默认模型
+    # collection = is_creat_collection(have_coll=have_coll, collection_name="reverse_image_search")
+    # 自定义模型
+    collection = is_creat_collection(have_coll=have_coll, collection_name="reverse_image_search_myModel")
+
+    # 测试的图片路径
+    img_path = ["D:/Code/ML/images/test02/test(mosaic,pz)/*/*/*/*"]
+
+    data = (towhee.glob['path'](*img_path)
+            # image_decode['path', 'img']().
+            # .runas_op['path', "object"](yolo_detect)
+            .runas_op['path', 'vec'](func=img2vec)
+            .tensor_normalize['vec', 'vec']()
+            # image_embedding.timm['img', 'vec'](model_name='resnet50').
+            .ann_search.milvus['vec', 'result'](collection=collection, limit=3)
+            .runas_op['result', 'result_imgID'](func=read_imgID)
+            .select['path', 'result_imgID', 'vec']()
+            )
+
+    print(data)
+
+    collection.load()
+    # res = query_by_imgID(collection, data[0].result_imgID[0])
+    #
+    # print(res[0])
+
+    top3_num = 0
+    top1_num = 0
+    test_img_num = len(list(data))
+
+    # 查询所有测试图片
+    for i in range(test_img_num):
+        top3_flag = False
+
+        # 获取图片真正的编号, 年份, 系列
+        source_code, source_year, source_series = from_path_get_info(data[i].path)
+
+        # 每个测试图片返回三个最相似的图片ID,一一测试
+        for j in range(3):
+            res = query_by_imgID(collection, data[i].result_imgID[j])
+
+            # 获取预测的图片的编号, 年份, 系列
+            result_code, result_year, result_series = from_query_path_get_info(res[0]['path'])
+
+            # 判断top1是否正确
+            if j == 0 and source_code == result_code and source_year == result_year and source_series == result_series:
+                top1_num += 1
+                print(top1_num)
+            elif j == 0:
+                print('top_1 错误')
+
+            # top3中有一个正确的标记为正确
+            if source_code == result_code and source_year == result_year and source_series == result_series:
+                top3_flag = True
+
+            print("series: {}, year: {},code: {} === result - series: {}, year: {}, code: {}".format(
+                source_series, source_year, source_code, result_series, result_year, result_code,
+            ))
+
+        if top3_flag:
+            top3_num += 1
+
+        print("====================================")
+
+    print("测试图片共: ", test_img_num)
+    top1_accuracy = (top1_num / test_img_num) * 100
+    top3_accuracy = (top3_num / test_img_num) * 100
+
+    print("top3 准确率:{} % \n top1 准确率: {} %".
+          format(top3_accuracy, top1_accuracy))
+
+'''
+
+  
+'''

+ 10 - 2
MyEfficientNet.py

@@ -1,4 +1,5 @@
 import torch
+import torch.nn as nn
 import torchvision.models as models
 import torchvision.transforms as transforms
 import cv2
@@ -7,7 +8,7 @@ import numpy as np
 import timm
 
 
-class MyEfficient:
+class MyModel:
     def __init__(self, model_dict_path, out_features=2560):
         self.out_features = out_features
         self.norm_mean = [0.485, 0.456, 0.406]
@@ -15,7 +16,12 @@ class MyEfficient:
 
         self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
-        self.model = models.efficientnet_b7(pretrained=True)
+        self.model = models.efficientnet_b7()
+        self.model.classifier = nn.Sequential(
+            nn.Linear(in_features=2560, out_features=out_features, bias=False)
+        )
+
+        self.model.load_state_dict(torch.load(model_dict_path, map_location=self.device))
         # self.model.fc = torch.nn.Linear(in_features=2048, out_features=self.out_features)
         # self.model.load_state_dict(torch.load(model_dict_path))
 
@@ -51,6 +57,8 @@ class MyEfficient:
     def predict(self, img):
         if type(img) == type('path'):
             img = Image.open(img).convert('RGB')
+        else:
+            img = img.convert('RGB')
 
         transform = self.inference_transform()
 

+ 67 - 0
MyModel2.py

@@ -0,0 +1,67 @@
+import torch
+import torchvision.models as models
+import torchvision.transforms as transforms
+import cv2
+from PIL import Image
+import numpy as np
+import timm
+
+
+class MyModel:
+    def __init__(self, model_dict_path, out_features=2048):
+        self.out_features = out_features
+        self.norm_mean = [0.485, 0.456, 0.406]
+        self.norm_std = [0.229, 0.224, 0.225]
+
+        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+        self.model = models.resnet50(pretrained=False)
+        self.model.fc = torch.nn.Linear(in_features=2048, out_features=self.out_features)
+
+        self.model.load_state_dict(torch.load(model_dict_path, map_location=self.device))
+        # self.model = timm.create_model('resnet50', num_classes=2048, pretrained=True)
+        self.model.eval()
+
+        # 自定义模型
+        # print(list(self.model.children()))
+        features = list(self.model.children())[:-1]  # 去掉dropout 和 Linear
+        self.model = torch.nn.Sequential(*features).to(self.device)
+
+        # self.model.to(self.device)
+
+    def inference_transform(self):
+        inference_transform = transforms.Compose([
+            transforms.Resize((224, 224)),
+            transforms.ToTensor(),
+            transforms.Normalize(self.norm_mean, self.norm_std),
+        ])
+        return inference_transform
+
+    def img_transform(self, img_rgb, transform=None):
+        # 将数据转换为模型读取的形式
+        if transform is None:
+            raise ValueError("找不到transform!必须有transform对img进行处理")
+
+        img_t = transform(img_rgb)
+        return img_t
+
+    def get_model(self):
+        return self.model
+
+    # 输出图片路径或者cv2格式的图片数据
+    def predict(self, img):
+        if type(img) == type('path'):
+            img = Image.open(img).convert('RGB')
+        else:
+            img = img.convert('RGB')
+
+        transform = self.inference_transform()
+
+        img_tensor = transform(img)
+        img_tensor.unsqueeze_(0)
+        img_tensor = img_tensor.to(self.device)
+        # print(img.shape)
+
+        with torch.no_grad():
+            outputs = self.model(img_tensor)
+        return outputs.reshape(2048).cpu().numpy()

+ 34 - 5
test04.py

@@ -1,7 +1,36 @@
-import glob
-import os
+import winsound
 
-path = r"D:\Code\ML\images\Mywork3\card_database_yolo\mosaic\20-21"
+# 定义音符和持续时间
+notes = {
+    "C": 262,  # do
+    "D": 294,  # re
+    "E": 330,  # mi
+    "F": 349,  # fa
+    "G": 392,  # sol
+    "A": 440,  # la
+    "B": 494   # si
+}
 
-for name in os.listdir(path):
-    os.rename(os.path.join(path, name), os.path.join(path, name.split('#')[-1]))
+# 定义音乐
+music = [
+    ("E", 500),  # mi
+    ("E", 500),  # mi
+    ("F", 500),  # fa
+    ("G", 500),  # sol
+    ("G", 500),  # sol
+    ("F", 500),  # fa
+    ("E", 500),  # mi
+    ("D", 500),  # re
+    ("C", 500),  # do
+    ("C", 500),  # do
+    ("D", 500),  # re
+    ("E", 500),  # mi
+    ("E", 500),  # mi
+    ("D", 500),  # re
+    ("D", 500)   # re
+]
+
+# 播放音乐
+for note, duration in music:
+    frequency = notes[note]
+    winsound.Beep(frequency, duration)

+ 43 - 0
测试日志.log

@@ -0,0 +1,43 @@
+ 测试图片共:  1670, 数据库图片: 5355
+ 自定义resnet50_out764_freeze4 + yolo + normalize
+top3 准确率:88.02395209580838 %
+ top1 准确率: 87.06586826347305 %
+
+ 测试图片共:  1670, 数据库图片: 5355
+ 自定义resnet50_out764_freeze5 + yolo + normalize
+top3 准确率:87.12574850299401 %
+ top1 准确率: 86.16766467065868 %
+
+ 测试图片共:  1670, 数据库图片: 5355
+ 自定义resnet50_out854_freeze6 + yolo + normalize
+top3 准确率:87.12574850299401 %
+ top1 准确率: 86.52694610778443 %
+
+
+测试图片共:  4796
+自定义resnet50_out764_freeze5 + yolo + normalize
+top3 准确率:88.76146788990825 %
+ top1 准确率: 85.09174311926606 %
+
+ 测试图片共:  4796,数据库图片: 5355
+  自定义resnet50_out854_freeze6 + yolo + normalize
+top3 准确率:91.4095079232694 %
+ top1 准确率: 87.90658882402002 %
+
+
+ ==========         ====
+
+  测试图片共:  1670, 数据库图片: 5355
+  自定义effcient_card_out764——freeze1 + yolo + normalize
+  top3 准确率:83.11377245508982 %
+ top1 准确率: 81.1377245508982 %
+
+ 测试图片共:  1670, 数据库图片: 5355
+ 自定义effcient_card_out854——freeze2 + yolo + normalize
+top3 准确率:83.89221556886227 %
+ top1 准确率: 82.63473053892216 %
+
+测试图片共:  4796, 数据库图片: 5355
+ 自定义effcient_card_out854——freeze2 + yolo + normalize
+top3 准确率:67.63969974979149 %
+ top1 准确率: 59.236864053377815 %