{"id":367,"date":"2023-02-23T00:07:15","date_gmt":"2023-02-22T16:07:15","guid":{"rendered":"https:\/\/shangwendada.co\/?p=367"},"modified":"2023-02-28T21:56:55","modified_gmt":"2023-02-28T13:56:55","slug":"anomly-transformer","status":"publish","type":"post","link":"https:\/\/blog.shangwendada.top\/index.php\/2023\/02\/23\/anomly-transformer\/","title":{"rendered":"Anomly-transformer"},"content":{"rendered":"<h1>Anomaly-transformer\u6280\u672f\u6587\u6863<\/h1>\n<h2>\u4ecb\u7ecd<\/h2>\n<p>Anomaly Transformer\u662f\u4e00\u79cd\u57fa\u4e8eTransformer\u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\uff0c\u7528\u4e8e\u68c0\u6d4b\u5f02\u5e38\u70b9\u3002\u4e0e\u4f20\u7edf\u7684\u5f02\u5e38\u68c0\u6d4b\u65b9\u6cd5\u76f8\u6bd4\uff0cAnomaly Transformer\u4e0d\u9700\u8981\u4e8b\u5148\u5bf9\u6570\u636e\u8fdb\u884c\u7279\u5f81\u5de5\u7a0b\uff0c\u56e0\u4e3a\u5b83\u53ef\u4ee5\u81ea\u52a8\u5b66\u4e60\u6570\u636e\u4e2d\u7684\u7279\u5f81\u3002\u8be5\u6a21\u578b\u7684\u8bbe\u8ba1\u601d\u60f3\u662f\u5728Transformer\u6a21\u578b\u7684\u57fa\u7840\u4e0a\u589e\u52a0\u4e00\u4e2a\u7528\u4e8e\u5f02\u5e38\u68c0\u6d4b\u7684\u5934\u90e8\uff08head\uff09\u3002<\/p>\n<p>\u5728Transformer\u6a21\u578b\u4e2d\uff0c\u8f93\u5165\u6570\u636e\u9996\u5148\u88ab\u5206\u6210\u591a\u4e2atoken\uff0c\u7136\u540e\u901a\u8fc7\u591a\u5c42\u81ea\u6ce8\u610f\u529b\u673a\u5236\uff08self-attention\uff09\u548c\u5168\u8fde\u63a5\u795e\u7ecf\u7f51\u7edc\uff08feed-forward network\uff09\u8fdb\u884c\u5904\u7406\u3002\u5728Anomaly Transformer\u4e2d\uff0c\u5f02\u5e38\u68c0\u6d4b\u5934\u90e8\u7684\u4f5c\u7528\u662f\u5c06\u6a21\u578b\u7684\u8f93\u51fa\u8f6c\u6362\u6210\u4e00\u4e2a\u5f02\u5e38\u5f97\u5206\uff08anomaly score\uff09\uff0c\u8be5\u5f97\u5206\u53cd\u6620\u4e86\u8f93\u5165\u6570\u636e\u4e0e\u6b63\u5e38\u6570\u636e\u4e4b\u95f4\u7684\u5dee\u5f02\u7a0b\u5ea6\u3002<\/p>\n<p>Anomaly Transformer\u7684\u8bad\u7ec3\u8fc7\u7a0b\u53ef\u4ee5\u5206\u4e3a\u4e24\u4e2a\u9636\u6bb5\u3002\u9996\u5148\uff0c\u5728\u6b63\u5e38\u6570\u636e\u4e0a\u8bad\u7ec3\u6a21\u578b\uff0c\u4ee5\u5b66\u4e60\u6570\u636e\u7684\u6b63\u5e38\u5206\u5e03\u3002\u5728\u8fd9\u4e2a\u9636\u6bb5\u4e2d\uff0c\u5f02\u5e38\u68c0\u6d4b\u5934\u90e8\u7684\u8f93\u51fa\u5c06\u88ab\u5ffd\u7565\u3002\u63a5\u7740\uff0c\u5728\u5f02\u5e38\u6570\u636e\u4e0a\u8fdb\u884c\u5fae\u8c03\uff0c\u4f7f\u5f97\u6a21\u578b\u53ef\u4ee5\u8bc6\u522b\u51fa\u5f02\u5e38\u70b9\u3002\u5728\u8fd9\u4e2a\u9636\u6bb5\u4e2d\uff0c\u5f02\u5e38\u68c0\u6d4b\u5934\u90e8\u7684\u8f93\u51fa\u5c06\u88ab\u7528\u4e8e\u8ba1\u7b97\u5f02\u5e38\u5f97\u5206\u3002<\/p>\n<p>Anomaly Transformer\u5728\u5f02\u5e38\u68c0\u6d4b\u65b9\u9762\u5177\u6709\u4e00\u4e9b\u4f18\u52bf\u3002\u9996\u5148\uff0c\u5b83\u53ef\u4ee5\u81ea\u52a8\u5b66\u4e60\u6570\u636e\u4e2d\u7684\u7279\u5f81\uff0c\u56e0\u6b64\u4e0d\u9700\u8981\u8fdb\u884c\u7e41\u7410\u7684\u7279\u5f81\u5de5\u7a0b\u3002\u5176\u6b21\uff0c\u5b83\u53ef\u4ee5\u5904\u7406\u4e0d\u540c\u7c7b\u578b\u7684\u6570\u636e\uff0c\u5305\u62ec\u65f6\u95f4\u5e8f\u5217\u3001\u6587\u672c\u548c\u56fe\u50cf\u6570\u636e\u7b49\u3002\u6700\u540e\uff0c\u7531\u4e8eAnomaly Transformer\u662f\u4e00\u4e2a\u7aef\u5230\u7aef\u7684\u6a21\u578b\uff0c\u56e0\u6b64\u53ef\u4ee5\u5feb\u901f\u5904\u7406\u5927\u89c4\u6a21\u6570\u636e\u96c6\u3002<\/p>\n<p>\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0cAnomaly Transformer\u5e76\u4e0d\u662f\u4e07\u80fd\u7684\uff0c\u5b83\u4e5f\u5b58\u5728\u4e00\u4e9b\u5c40\u9650\u6027\u3002\u4f8b\u5982\uff0c\u5f53\u6570\u636e\u96c6\u4e2d\u7684\u5f02\u5e38\u70b9\u6bd4\u6b63\u5e38\u70b9\u66f4\u591a\u65f6\uff0c\u6a21\u578b\u53ef\u80fd\u4f1a\u96be\u4ee5\u8bc6\u522b\u5f02\u5e38\u70b9\u3002\u6b64\u5916\uff0c\u7531\u4e8e\u5f02\u5e38\u70b9\u901a\u5e38\u662f\u5c11\u6570\uff0c\u56e0\u6b64\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u9700\u8981\u7279\u522b\u5173\u6ce8\u5f02\u5e38\u70b9\u7684\u6743\u91cd\u5206\u914d\u3002<\/p>\n<h2>\u5b9e\u73b0\u65b9\u6cd5<\/h2>\n<p>Anomaly Transformer\u7684\u5b9e\u73b0\u65b9\u6cd5\u901a\u5e38\u53ef\u4ee5\u5206\u4e3a\u4ee5\u4e0b\u51e0\u4e2a\u6b65\u9aa4\uff1a<\/p>\n<h3>\u6570\u636e\u9884\u5904\u7406<\/h3>\n<p>\u9996\u5148\u9700\u8981\u5bf9\u6570\u636e\u8fdb\u884c\u9884\u5904\u7406\uff0c\u5c06\u539f\u59cb\u6570\u636e\u8f6c\u6362\u4e3a\u9002\u5408\u8f93\u5165\u5230Anomaly Transformer\u6a21\u578b\u7684\u5f62\u5f0f\u3002\u5177\u4f53\u800c\u8a00\uff0c\u9700\u8981\u5c06\u6570\u636e\u5206\u6210\u591a\u4e2atoken\uff0c\u5e76\u5c06\u6bcf\u4e2atoken\u8868\u793a\u4e3a\u5411\u91cf\u3002<\/p>\n<h3>\u6a21\u578b\u8bbe\u8ba1<\/h3>\n<p>Anomaly Transformer\u7684\u6a21\u578b\u8bbe\u8ba1\u57fa\u4e8eTransformer\u6a21\u578b\uff0c\u4f46\u662f\u5728\u5176\u57fa\u7840\u4e0a\u589e\u52a0\u4e86\u4e00\u4e2a\u7528\u4e8e\u5f02\u5e38\u68c0\u6d4b\u7684\u5934\u90e8\u3002\u8be5\u5934\u90e8\u901a\u5e38\u7531\u4e00\u4e9b\u5168\u8fde\u63a5\u5c42\u548c\u4e00\u4e2a\u8f93\u51fa\u5c42\u7ec4\u6210\uff0c\u7528\u4e8e\u5c06Transformer\u7684\u8f93\u51fa\u8f6c\u6362\u4e3a\u4e00\u4e2a\u5f02\u5e38\u5f97\u5206\u3002\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u6ce8\u610f\u8c03\u6574\u5f02\u5e38\u68c0\u6d4b\u5934\u90e8\u7684\u53c2\u6570\uff0c\u4ee5\u4f7f\u5f97\u6a21\u578b\u53ef\u4ee5\u8bc6\u522b\u51fa\u5f02\u5e38\u70b9\u3002<\/p>\n<h3>\u6a21\u578b\u8bad\u7ec3<\/h3>\n<p>\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u4f7f\u7528\u6b63\u5e38\u6570\u636e\u6765\u8bad\u7ec3\u6a21\u578b\uff0c\u4ee5\u5b66\u4e60\u6570\u636e\u7684\u6b63\u5e38\u5206\u5e03\u3002\u8bad\u7ec3\u8fc7\u7a0b\u901a\u5e38\u4f7f\u7528\u53cd\u5411\u4f20\u64ad\u7b97\u6cd5\u548c\u4f18\u5316\u5668\u6765\u66f4\u65b0\u6a21\u578b\u7684\u53c2\u6570\u3002\u4e3a\u4e86\u786e\u4fdd\u6a21\u578b\u80fd\u591f\u8bc6\u522b\u51fa\u5f02\u5e38\u70b9\uff0c\u8fd8\u9700\u8981\u5728\u8bad\u7ec3\u96c6\u4e2d\u6dfb\u52a0\u4e00\u5b9a\u6bd4\u4f8b\u7684\u5f02\u5e38\u70b9\uff0c\u5e76\u5c06\u5176\u6807\u8bb0\u4e3a\u5f02\u5e38\u6570\u636e\u3002<\/p>\n<h3>\u6a21\u578b\u8bc4\u4f30<\/h3>\n<p>\u5728\u8bad\u7ec3\u5b8c\u6210\u540e\uff0c\u9700\u8981\u5bf9\u6a21\u578b\u8fdb\u884c\u8bc4\u4f30\uff0c\u4ee5\u786e\u5b9a\u5176\u5728\u5f02\u5e38\u68c0\u6d4b\u65b9\u9762\u7684\u6027\u80fd\u3002\u901a\u5e38\u53ef\u4ee5\u4f7f\u7528\u51c6\u786e\u7387\u3001\u53ec\u56de\u7387\u548cF1\u503c\u7b49\u6307\u6807\u6765\u8bc4\u4f30\u6a21\u578b\u7684\u6027\u80fd\u3002\u6b64\u5916\uff0c\u8fd8\u9700\u8981\u5bf9\u6a21\u578b\u8fdb\u884c\u8c03\u4f18\uff0c\u4ee5\u8fdb\u4e00\u6b65\u63d0\u9ad8\u5176\u6027\u80fd\u3002<\/p>\n<h3>\u90e8\u7f72\u6a21\u578b<\/h3>\n<p>\u6700\u540e\uff0c\u53ef\u4ee5\u5c06\u8bad\u7ec3\u597d\u7684\u6a21\u578b\u90e8\u7f72\u5230\u751f\u4ea7\u73af\u5883\u4e2d\uff0c\u4ee5\u8fdb\u884c\u5b9e\u65f6\u7684\u5f02\u5e38\u68c0\u6d4b\u3002\u5728\u90e8\u7f72\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u8003\u8651\u6a21\u578b\u7684\u6548\u7387\u548c\u53ef\u9760\u6027\uff0c\u5e76\u4fdd\u8bc1\u5176\u53ef\u4ee5\u5904\u7406\u4e0d\u540c\u7c7b\u578b\u7684\u6570\u636e\u3002\u6b64\u5916\uff0c\u8fd8\u9700\u8981\u5b9a\u671f\u5bf9\u6a21\u578b\u8fdb\u884c\u66f4\u65b0\u548c\u7ef4\u62a4\uff0c\u4ee5\u4fdd\u8bc1\u5176\u6301\u7eed\u7684\u6027\u80fd\u3002<\/p>\n<h2>\u6d89\u53ca\u77e5\u8bc6\u70b9<\/h2>\n<p>Anomaly Transformer \u662f\u4e00\u79cd\u57fa\u4e8e Transformer \u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\uff0c\u7528\u4e8e\u68c0\u6d4b\u5f02\u5e38\u70b9\u3002\u5b83\u4e3b\u8981\u6d89\u53ca\u4ee5\u4e0b\u77e5\u8bc6\u70b9\uff1a<\/p>\n<ol>\n<li>Transformer \u6a21\u578b\uff1aAnomaly Transformer \u662f\u5728 Transformer \u6a21\u578b\u7684\u57fa\u7840\u4e0a\u8fdb\u884c\u6269\u5c55\u7684\u3002\u56e0\u6b64\uff0c\u7406\u89e3 Transformer \u6a21\u578b\u7684\u539f\u7406\u548c\u673a\u5236\u5bf9\u4e8e\u7406\u89e3 Anomaly Transformer \u7684\u5b9e\u73b0\u975e\u5e38\u91cd\u8981\u3002Transformer \u6a21\u578b\u5305\u62ec\u81ea\u6ce8\u610f\u529b\u673a\u5236\u548c\u524d\u9988\u795e\u7ecf\u7f51\u7edc\u7b49\u7ec4\u4ef6\uff0c\u53ef\u4ee5\u5b9e\u73b0\u8f93\u5165\u5e8f\u5217\u7684\u7f16\u7801\u548c\u89e3\u7801\u3002<\/li>\n<li>\u81ea\u6ce8\u610f\u529b\u673a\u5236\uff1a\u81ea\u6ce8\u610f\u529b\u673a\u5236\u662f Transformer \u6a21\u578b\u7684\u6838\u5fc3\u7ec4\u4ef6\u4e4b\u4e00\uff0c\u7528\u4e8e\u8ba1\u7b97\u8f93\u5165\u5e8f\u5217\u4e2d\u6bcf\u4e2a token \u4e0e\u5176\u4ed6 token \u4e4b\u95f4\u7684\u76f8\u5173\u6027\u3002Anomaly Transformer \u5728\u81ea\u6ce8\u610f\u529b\u673a\u5236\u7684\u57fa\u7840\u4e0a\u589e\u52a0\u4e86\u4e00\u4e2a\u5f02\u5e38\u68c0\u6d4b\u5934\u90e8\uff0c\u7528\u4e8e\u5c06\u6a21\u578b\u7684\u8f93\u51fa\u8f6c\u6362\u6210\u4e00\u4e2a\u5f02\u5e38\u5f97\u5206\u3002<\/li>\n<li>\u5f02\u5e38\u68c0\u6d4b\uff1aAnomaly Transformer \u7684\u4e3b\u8981\u76ee\u7684\u662f\u68c0\u6d4b\u5f02\u5e38\u70b9\u3002\u56e0\u6b64\uff0c\u9700\u8981\u7406\u89e3\u5e38\u89c1\u7684\u5f02\u5e38\u68c0\u6d4b\u65b9\u6cd5\uff0c\u4f8b\u5982\u57fa\u4e8e\u7edf\u8ba1\u5b66\u7684\u65b9\u6cd5\u548c\u57fa\u4e8e\u673a\u5668\u5b66\u4e60\u7684\u65b9\u6cd5\u7b49\u3002\u540c\u65f6\uff0c\u8fd8\u9700\u8981\u4e86\u89e3\u5f02\u5e38\u70b9\u7684\u5b9a\u4e49\u548c\u7279\u5f81\u3002<\/li>\n<li>\u795e\u7ecf\u7f51\u7edc\u8bad\u7ec3\u548c\u4f18\u5316\uff1aAnomaly Transformer \u662f\u4e00\u4e2a\u57fa\u4e8e\u795e\u7ecf\u7f51\u7edc\u7684\u6a21\u578b\uff0c\u9700\u8981\u4f7f\u7528\u53cd\u5411\u4f20\u64ad\u7b97\u6cd5\u548c\u4f18\u5316\u5668\u6765\u66f4\u65b0\u6a21\u578b\u7684\u53c2\u6570\u3002\u56e0\u6b64\uff0c\u9700\u8981\u4e86\u89e3\u795e\u7ecf\u7f51\u7edc\u7684\u8bad\u7ec3\u548c\u4f18\u5316\u8fc7\u7a0b\uff0c\u5305\u62ec\u635f\u5931\u51fd\u6570\u7684\u8bbe\u8ba1\u3001\u5b66\u4e60\u7387\u7684\u8c03\u6574\u548c\u68af\u5ea6\u4e0b\u964d\u7b49\u3002<\/li>\n<li>\u6570\u636e\u9884\u5904\u7406\uff1a\u5728\u8bad\u7ec3 Anomaly Transformer \u6a21\u578b\u4e4b\u524d\uff0c\u9700\u8981\u5bf9\u6570\u636e\u8fdb\u884c\u9884\u5904\u7406\uff0c\u5c06\u539f\u59cb\u6570\u636e\u8f6c\u6362\u4e3a\u9002\u5408\u8f93\u5165\u5230\u6a21\u578b\u7684\u5f62\u5f0f\u3002\u8fd9\u901a\u5e38\u6d89\u53ca\u5230\u5c06\u6570\u636e\u5206\u6210\u591a\u4e2a token\uff0c\u5e76\u5c06\u6bcf\u4e2a token \u8868\u793a\u4e3a\u5411\u91cf\u3002<\/li>\n<li>\u6a21\u578b\u8bc4\u4f30\u548c\u8c03\u4f18\uff1aAnomaly Transformer \u7684\u6027\u80fd\u9700\u8981\u901a\u8fc7\u8bc4\u4f30\u6307\u6807\u6765\u8861\u91cf\uff0c\u4f8b\u5982\u51c6\u786e\u7387\u3001\u53ec\u56de\u7387\u548c F1 \u503c\u7b49\u3002\u540c\u65f6\uff0c\u8fd8\u9700\u8981\u5bf9\u6a21\u578b\u8fdb\u884c\u8c03\u4f18\uff0c\u4ee5\u8fdb\u4e00\u6b65\u63d0\u9ad8\u5176\u6027\u80fd\u3002\u8fd9\u6d89\u53ca\u5230\u9009\u62e9\u5408\u9002\u7684\u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\u3001\u8c03\u6574\u6a21\u578b\u53c2\u6570\u3001\u91c7\u7528\u6b63\u5219\u5316\u6280\u672f\u7b49\u3002<\/li>\n<li>\u6a21\u578b\u90e8\u7f72\u548c\u7ef4\u62a4\uff1a\u6700\u540e\uff0cAnomaly Transformer \u6a21\u578b\u9700\u8981\u90e8\u7f72\u5230\u751f\u4ea7\u73af\u5883\u4e2d\u8fdb\u884c\u5b9e\u65f6\u7684\u5f02\u5e38\u68c0\u6d4b\u3002\u5728\u90e8\u7f72\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u8003\u8651\u6a21\u578b\u7684\u6548\u7387\u548c\u53ef\u9760\u6027\uff0c\u5e76\u4fdd\u8bc1\u5176\u53ef\u4ee5\u5904\u7406\u4e0d\u540c\u7c7b\u578b\u7684\u6570\u636e\u3002\u6b64\u5916\uff0c\u8fd8\u9700\u8981\u5b9a\u671f\u5bf9\u6a21\u578b\u8fdb\u884c\u66f4\u65b0\u548c\u7ef4\u62a4\uff0c\u4ee5\u4fdd\u8bc1\u5176\u6301\u7eed\u7684\u6027\u80fd\u3002<\/li>\n<\/ol>\n<h2>Transformer \u8f93\u5165\u6a21\u5757<\/h2>\n<h3>\u4ee3\u7801<\/h3>\n<pre><code class=\"language-python\"># -*- coding: utf-8 -*-\nimport copy\nimport torch\nimport torch.nn as nn\nimport math\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nclass Embeddings(nn.Module):\n    def __init__(self,d_model,vocab):\n        #d_model\u662f\u8bcd\u5d4c\u5165\u7684\u7ef4\u5ea6,vacab\u662f\u8bcd\u8868\u5927\u5c0f\n        super().__init__()\n        self.lut=nn.Embedding(vocab,d_model)\n        self.d_model=d_model\n    def forward(self,x):\n        return self.lut(x)*math.sqrt(self.d_model)\nd_model=512#\u7ef4\u5ea6\u5927\u5c0f\nvocab=1000#\u8bcd\u8868\u5927\u5c0f\nx=Variable(torch.LongTensor([[100,2,421,999],[491,998,1,221]]))\n#\u6570\u5b57\u76f8\u5f53\u4e8e\u8bcd\u8868\u4e2d\u8bcd\u8bed\u7684\u4e0b\u6807,\u8bcd\u8868\u5927\u5c0f\u4e3a1000\uff0c\u6240\u4ee5tensor\u6570\u5b57\u8303\u56f4\u4e3a[0,999]\nemb=Embeddings(d_model, vocab)\nembr=emb(x)\n#\u7b49\u6548\u4e8eemb.forward(x)\nprint(embr)\nprint(embr.shape)\n<\/code><\/pre>\n<p>\u8fd9\u662f\u4e00\u4e2a\u7528 PyTorch \u5b9e\u73b0\u7684\u8bcd\u5d4c\u5165\uff08embedding\uff09\u5c42\uff0c\u5b83\u5c06\u4e00\u4e2a\u6574\u6570\u5f20\u91cf\uff08tensor\uff09\u7f16\u7801\u4e3a\u5bc6\u96c6\u7684\u5b9e\u6570\u5411\u91cf\uff08vector\uff09\u3002<\/p>\n<p>\u5177\u4f53\u6765\u8bf4\uff0c\u4ee3\u7801\u5b9a\u4e49\u4e86\u4e00\u4e2a\u540d\u4e3a Embeddings \u7684\u7c7b\uff0c\u5b83\u7ee7\u627f\u81ea PyTorch \u7684 nn.Module \u7c7b\u3002Embeddings \u7c7b\u7684\u521d\u59cb\u5316\u65b9\u6cd5 <strong>init<\/strong>() \u63a5\u53d7\u4e24\u4e2a\u53c2\u6570\uff1ad_model \u548c vocab\u3002\u5176\u4e2d\uff0cd_model \u8868\u793a\u8bcd\u5d4c\u5165\u5411\u91cf\u7684\u7ef4\u5ea6\u5927\u5c0f\uff0cvocab \u8868\u793a\u8bcd\u8868\u5927\u5c0f\u3002<\/p>\n<p>\u5728\u521d\u59cb\u5316\u65b9\u6cd5\u4e2d\uff0c\u7c7b\u5b9a\u4e49\u4e86\u4e00\u4e2a nn.Embedding \u5bf9\u8c61\uff0c\u5b83\u662f PyTorch \u4e2d\u7684\u5185\u7f6e\u8bcd\u5d4c\u5165\u5c42\u3002\u8be5\u5c42\u7684\u8f93\u5165\u662f\u4e00\u4e2a\u6574\u6570\uff0c\u8f93\u51fa\u662f\u4e00\u4e2a\u5f62\u72b6\u4e3a [batch_size, seq_length, d_model] \u7684\u5b9e\u6570\u5f20\u91cf\u3002\u5728\u672c\u4f8b\u4e2d\uff0cbatch_size \u4e3a 2\uff0cseq_length \u4e3a 4\uff0c\u56e0\u6b64 Embeddings \u5c42\u7684\u8f93\u51fa\u5f62\u72b6\u4e3a [2, 4, 512]\u3002\u5728 forward() \u65b9\u6cd5\u4e2d\uff0cEmbeddings \u5c42\u9996\u5148\u8c03\u7528 nn.Embedding \u5bf9\u8c61\uff0c\u5c06\u6574\u6570\u5f20\u91cf x \u8f6c\u6362\u4e3a\u5b9e\u6570\u5f20\u91cf\uff0c\u7136\u540e\u4e58\u4ee5 math.sqrt(self.d_model)\u3002\u8fd9\u662f\u4e3a\u4e86\u7f29\u653e\u8bcd\u5d4c\u5165\u5411\u91cf\uff0c\u4ee5\u4f7f\u5176\u5927\u5c0f\u9002\u5408\u4e8e Transformer \u6a21\u578b\u3002<\/p>\n<p>\u5728\u8fd9\u4e2a\u4ee3\u7801\u7247\u6bb5\u7684\u6700\u540e\uff0c\u7a0b\u5e8f\u901a\u8fc7\u4e00\u4e2a\u8f93\u5165\u6574\u6570\u5f20\u91cf x \u8c03\u7528 Embeddings \u7c7b\u7684 forward() \u65b9\u6cd5\u3002x \u7684\u5f62\u72b6\u662f [2, 4]\uff0c\u8868\u793a\u4e00\u4e2a\u5927\u5c0f\u4e3a 2 \u7684 batch\uff0c\u6bcf\u4e2a batch \u5305\u542b 4 \u4e2a\u6574\u6570\u3002\u5728\u8c03\u7528 forward() \u65b9\u6cd5\u4e4b\u540e\uff0c\u7a0b\u5e8f\u8f93\u51fa\u4e86\u8bcd\u5d4c\u5165\u5c42\u7684\u8f93\u51fa embr\uff0c\u4ee5\u53ca\u5176\u5f62\u72b6 [2, 4, 512]\u3002\u5176\u4e2d\uff0c512 \u8868\u793a\u8bcd\u5d4c\u5165\u5411\u91cf\u7684\u7ef4\u5ea6\u5927\u5c0f\u3002<\/p>\n<h3>batch(\u6279\u6b21)<\/h3>\n<p>\u5728\u673a\u5668\u5b66\u4e60\u4e2d\uff0cbatch\uff08\u6279\u6b21\uff09\u662f\u6307\u4e00\u7ec4\u540c\u65f6\u88ab\u8f93\u5165\u5230\u795e\u7ecf\u7f51\u7edc\u8fdb\u884c\u8bad\u7ec3\u6216\u63a8\u7406\u7684\u6570\u636e\u6837\u672c\u3002\u901a\u5e38\uff0c\u6bcf\u4e2a\u6837\u672c\u90fd\u662f\u4e00\u4e2a\u5f20\u91cf\uff08tensor\uff09\uff0c\u800c\u4e00\u4e2a batch \u5219\u662f\u4e00\u4e2a\u5f20\u91cf\u5217\u8868\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5f20\u91cf\u7684\u7b2c\u4e00\u4e2a\u7ef4\u5ea6\u90fd\u662f\u76f8\u540c\u7684\uff0c\u8868\u793a\u8be5 batch \u7684\u5927\u5c0f\u3002<\/p>\n<p>\u4ee5\u56fe\u50cf\u5206\u7c7b\u4efb\u52a1\u4e3a\u4f8b\uff0c\u5047\u8bbe\u6709\u4e00\u4e2a\u5305\u542b 10,000 \u5f20\u56fe\u50cf\u7684\u6570\u636e\u96c6\uff0c\u6bcf\u5f20\u56fe\u50cf\u7684\u5927\u5c0f\u4e3a 224&#215;224 \u50cf\u7d20\uff0c\u5e76\u4e14\u6709 100 \u4e2a\u4e0d\u540c\u7684\u7c7b\u522b\u3002\u5982\u679c\u6211\u4eec\u5c06\u6240\u6709\u56fe\u50cf\u540c\u65f6\u8f93\u5165\u5230\u795e\u7ecf\u7f51\u7edc\u8fdb\u884c\u8bad\u7ec3\uff0c\u4f1a\u5360\u7528\u5f88\u591a\u5185\u5b58\uff0c\u8bad\u7ec3\u65f6\u95f4\u4e5f\u4f1a\u5f88\u957f\u3002\u76f8\u53cd\uff0c\u6211\u4eec\u53ef\u4ee5\u5c06\u6570\u636e\u96c6\u5206\u6210\u82e5\u5e72\u4e2a batch\uff0c\u6bcf\u4e2a batch \u5305\u542b 32 \u6216 64 \u5f20\u56fe\u50cf\uff0c\u7136\u540e\u4f9d\u6b21\u5c06\u8fd9\u4e9b batch \u8f93\u5165\u5230\u7f51\u7edc\u4e2d\u8fdb\u884c\u8bad\u7ec3\u3002\u8fd9\u6837\u505a\u7684\u597d\u5904\u662f\u53ef\u4ee5\u8282\u7701\u5185\u5b58\uff0c\u52a0\u901f\u8bad\u7ec3\uff0c\u540c\u65f6\u4e5f\u6709\u52a9\u4e8e\u6a21\u578b\u6cdb\u5316\uff0c\u907f\u514d\u8fc7\u5ea6\u62df\u5408\u3002<\/p>\n<p>\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0cbatch \u7684\u5927\u5c0f\u901a\u5e38\u662f\u4e00\u4e2a\u8d85\u53c2\u6570\uff0c\u9700\u8981\u901a\u8fc7\u5b9e\u9a8c\u6765\u8fdb\u884c\u8c03\u6574\u3002\u8f83\u5c0f\u7684 batch \u5927\u5c0f\u4f1a\u4f7f\u68af\u5ea6\u66f4\u65b0\u66f4\u52a0\u9891\u7e41\uff0c\u4f46\u4e5f\u53ef\u80fd\u4f1a\u589e\u52a0\u566a\u58f0\u548c\u4e0d\u7a33\u5b9a\u6027\uff1b\u800c\u8f83\u5927\u7684 batch \u5927\u5c0f\u5219\u53ef\u80fd\u4f1a\u5bfc\u81f4\u5185\u5b58\u4e0d\u8db3\u3001\u8bad\u7ec3\u65f6\u95f4\u8fc7\u957f\u7b49\u95ee\u9898\u3002\u56e0\u6b64\uff0c\u9700\u8981\u5728\u5b9e\u8df5\u4e2d\u8fdb\u884c\u6d4b\u8bd5\u548c\u4f18\u5316\uff0c\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684 batch \u5927\u5c0f\u4ee5\u83b7\u5f97\u6700\u4f73\u7684\u8bad\u7ec3\u6548\u679c\u3002<\/p>\n<h3>\u5f20\u91cf<\/h3>\n<p>\u5728\u6df1\u5ea6\u5b66\u4e60\u4e2d\uff0c\u5f20\u91cf\uff08tensor\uff09\u662f\u4e00\u79cd\u591a\u7ef4\u6570\u7ec4\uff0c\u662f\u795e\u7ecf\u7f51\u7edc\u4e2d\u6700\u57fa\u672c\u7684\u6570\u636e\u7ed3\u6784\u4e4b\u4e00\u3002\u5f20\u91cf\u53ef\u4ee5\u770b\u4f5c\u662f\u5411\u91cf\u3001\u77e9\u9635\u548c\u5176\u4ed6\u66f4\u9ad8\u7ef4\u5ea6\u6570\u7ec4\u7684\u63a8\u5e7f\uff0c\u53ef\u4ee5\u5b58\u50a8\u548c\u8868\u793a\u5404\u79cd\u7c7b\u578b\u7684\u6570\u636e\uff0c\u4f8b\u5982\u56fe\u50cf\u3001\u97f3\u9891\u3001\u6587\u672c\u7b49\u3002<\/p>\n<p>\u5728\u795e\u7ecf\u7f51\u7edc\u4e2d\uff0c\u8f93\u5165\u6570\u636e\u548c\u6a21\u578b\u53c2\u6570\u90fd\u662f\u4ee5\u5f20\u91cf\u7684\u5f62\u5f0f\u4f20\u9012\u548c\u5b58\u50a8\u7684\u3002\u5177\u4f53\u6765\u8bf4\uff0c\u8f93\u5165\u6570\u636e\u7ecf\u8fc7\u5f20\u91cf\u7684\u5f62\u5f0f\u8fdb\u884c\u8868\u793a\u548c\u5904\u7406\uff0c\u7ecf\u8fc7\u4e00\u7cfb\u5217\u7684\u7ebf\u6027\u53d8\u6362\u3001\u975e\u7ebf\u6027\u6fc0\u6d3b\u548c\u6c60\u5316\u7b49\u64cd\u4f5c\uff0c\u6700\u7ec8\u8f93\u51fa\u4e00\u4e2a\u6216\u591a\u4e2a\u5f20\u91cf\u4f5c\u4e3a\u9884\u6d4b\u7ed3\u679c\u6216\u7279\u5f81\u8868\u793a\u3002\u800c\u6a21\u578b\u53c2\u6570\u5219\u662f\u901a\u8fc7\u53cd\u5411\u4f20\u64ad\u7b97\u6cd5\u66f4\u65b0\uff0c\u4ee5\u6700\u5c0f\u5316\u635f\u5931\u51fd\u6570\uff0c\u5e76\u63d0\u9ad8\u6a21\u578b\u7684\u51c6\u786e\u6027\u548c\u6cdb\u5316\u80fd\u529b\u3002<\/p>\n<p>\u5f20\u91cf\u7684\u91cd\u8981\u6027\u8fd8\u4f53\u73b0\u5728\u5b83\u7684\u9ad8\u6548\u6027\u4e0a\uff0c\u5f20\u91cf\u8fd0\u7b97\u53ef\u4ee5\u901a\u8fc7GPU\u7b49\u786c\u4ef6\u52a0\u901f\uff0c\u52a0\u5feb\u795e\u7ecf\u7f51\u7edc\u7684\u8bad\u7ec3\u548c\u63a8\u7406\u901f\u5ea6\uff0c\u5b9e\u73b0\u5bf9\u5927\u89c4\u6a21\u6570\u636e\u548c\u6a21\u578b\u7684\u9ad8\u6548\u5904\u7406\u3002<\/p>\n<h3>\u7ef4\u5ea6<\/h3>\n<p>\u5728\u6df1\u5ea6\u5b66\u4e60\u4e2d\uff0c\u7ef4\u5ea6\uff08dimension\uff09\u901a\u5e38\u6307\u7684\u662f\u5f20\u91cf\u7684\u79e9\uff08rank\uff09\uff0c\u5373\u5f20\u91cf\u4e2d\u5305\u542b\u5143\u7d20\u7684\u8f74\u6570\u3002\u4f8b\u5982\uff0c\u6807\u91cf\uff08scalar\uff09\u7684\u79e9\u4e3a0\uff0c\u5411\u91cf\uff08vector\uff09\u7684\u79e9\u4e3a1\uff0c\u77e9\u9635\uff08matrix\uff09\u7684\u79e9\u4e3a2\uff0c\u4ee5\u6b64\u7c7b\u63a8\u3002<\/p>\n<p>\u6bcf\u4e2a\u8f74\u7684\u957f\u5ea6\u8868\u793a\u8be5\u8f74\u65b9\u5411\u4e0a\u5143\u7d20\u7684\u6570\u91cf\uff0c\u4f8b\u5982\u4e00\u4e2a\u5f62\u72b6\u4e3a (3, 4, 5) \u7684\u5f20\u91cf\uff0c\u5176\u4e2d\u7b2c\u4e00\u4e2a\u8f74\u7684\u957f\u5ea6\u4e3a3\uff0c\u7b2c\u4e8c\u4e2a\u8f74\u7684\u957f\u5ea6\u4e3a4\uff0c\u7b2c\u4e09\u4e2a\u8f74\u7684\u957f\u5ea6\u4e3a5\u3002\u5bf9\u4e8e\u4e09\u7ef4\u5f20\u91cf\uff0c\u53ef\u4ee5\u5c06\u5176\u89c6\u4e3a\u4e00\u4e2a\u75313\u4e2a\u4e8c\u7ef4\u77e9\u9635\u7ec4\u6210\u7684\u7acb\u4f53\u56fe\u50cf\uff0c\u5176\u4e2d\u6bcf\u4e2a\u77e9\u9635\u4ee3\u8868\u4e00\u4e2a\u6c34\u5e73\u5207\u7247\u3002<\/p>\n<p>\u5728\u6df1\u5ea6\u5b66\u4e60\u4e2d\uff0c\u7ef4\u5ea6\u7684\u91cd\u8981\u6027\u5728\u4e8e\u5b83\u4e0e\u795e\u7ecf\u7f51\u7edc\u7684\u7ed3\u6784\u548c\u8ba1\u7b97\u6709\u7740\u5bc6\u5207\u7684\u5173\u7cfb\u3002\u4f8b\u5982\uff0c\u5728\u5377\u79ef\u795e\u7ecf\u7f51\u7edc\u4e2d\uff0c\u901a\u8fc7\u6539\u53d8\u5377\u79ef\u5c42\u7684\u8f93\u5165\u548c\u8f93\u51fa\u5f20\u91cf\u7684\u7ef4\u5ea6\uff0c\u53ef\u4ee5\u5b9e\u73b0\u4e0d\u540c\u5f62\u5f0f\u7684\u5377\u79ef\u64cd\u4f5c\uff0c\u5982\u540c\u5377\u79ef\u6838\u5927\u5c0f\u3001\u6b65\u957f\u3001\u586b\u5145\u65b9\u5f0f\u7b49\u3002\u6b64\u5916\uff0c\u7ef4\u5ea6\u8fd8\u6d89\u53ca\u5230\u5f20\u91cf\u7684\u5f62\u72b6\u53d8\u6362\u3001\u6269\u5c55\u3001\u62fc\u63a5\u7b49\u5e38\u89c1\u64cd\u4f5c\uff0c\u8fd9\u4e9b\u64cd\u4f5c\u662f\u6df1\u5ea6\u5b66\u4e60\u4e2d\u5fc5\u4e0d\u53ef\u5c11\u7684\u6280\u80fd\u3002<\/p>\n<h3>Embedding(\u8bcd\u5d4c\u5165)<\/h3>\n<p>\u5728\u81ea\u7136\u8bed\u8a00\u5904\u7406\u4e2d\uff0cEmbedding\uff08\u8bcd\u5d4c\u5165\uff09\u662f\u5c06\u8bcd\u8bed\u6620\u5c04\u5230\u4f4e\u7ef4\u7a20\u5bc6\u5411\u91cf\u7a7a\u95f4\u4e2d\u7684\u8fc7\u7a0b\u3002\u5b83\u662f\u4e00\u79cd\u5e38\u7528\u7684\u5c06\u79bb\u6563\u7b26\u53f7\u8f6c\u6362\u4e3a\u8fde\u7eed\u5411\u91cf\u8868\u793a\u7684\u65b9\u6cd5\uff0c\u662f\u81ea\u7136\u8bed\u8a00\u5904\u7406\u4e2d\u7684\u4e00\u9879\u91cd\u8981\u6280\u672f\u3002<\/p>\n<p>\u5728\u6df1\u5ea6\u5b66\u4e60\u4e2d\uff0cEmbedding\u901a\u5e38\u901a\u8fc7\u4e00\u4e2a\u77e9\u9635\u6765\u5b9e\u73b0\uff0c\u8be5\u77e9\u9635\u7684\u884c\u6570\u7b49\u4e8e\u8bcd\u8868\u4e2d\u5355\u8bcd\u7684\u6570\u91cf\uff0c\u5217\u6570\u7b49\u4e8e\u5d4c\u5165\u7684\u7ef4\u5ea6\u3002\u6bcf\u4e2a\u5355\u8bcd\u88ab\u8868\u793a\u4e3a\u8be5\u77e9\u9635\u4e2d\u7684\u4e00\u884c\uff0c\u4e5f\u5c31\u662f\u4e00\u4e2a\u4f4e\u7ef4\u7684\u7a20\u5bc6\u5411\u91cf\uff0c\u79f0\u4e3a\u8be5\u5355\u8bcd\u7684Embedding\u5411\u91cf\u3002Embedding\u5411\u91cf\u7684\u957f\u5ea6\u901a\u5e38\u5728\u51e0\u5341\u5230\u51e0\u767e\u4e4b\u95f4\uff0c\u4e0d\u540c\u7684\u5e94\u7528\u573a\u666f\u548c\u4efb\u52a1\u9700\u8981\u4e0d\u540c\u7684\u957f\u5ea6\u3002<\/p>\n<p>\u4f7f\u7528Embedding\u53ef\u4ee5\u5c06\u79bb\u6563\u7684\u5355\u8bcd\u6216\u5b57\u7b26\u8f6c\u6362\u4e3a\u8fde\u7eed\u7684\u5411\u91cf\uff0c\u4f7f\u5f97\u795e\u7ecf\u7f51\u7edc\u53ef\u4ee5\u66f4\u597d\u5730\u5904\u7406\u6587\u672c\u6570\u636e\u3002\u4f8b\u5982\uff0c\u5728\u81ea\u7136\u8bed\u8a00\u5904\u7406\u4e2d\uff0c\u53ef\u4ee5\u5c06\u6bcf\u4e2a\u5355\u8bcd\u6620\u5c04\u4e3a\u4e00\u4e2aEmbedding\u5411\u91cf\uff0c\u7136\u540e\u4f7f\u7528\u8fd9\u4e9b\u5411\u91cf\u4f5c\u4e3a\u8f93\u5165\uff0c\u8fdb\u884c\u540e\u7eed\u7684\u6587\u672c\u5206\u7c7b\u3001\u60c5\u611f\u5206\u6790\u3001\u673a\u5668\u7ffb\u8bd1\u7b49\u4efb\u52a1\u3002<\/p>\n<h3>forward\u65b9\u6cd5<\/h3>\n<p>\u5728\u795e\u7ecf\u7f51\u7edc\u4e2d\uff0cforward()\u65b9\u6cd5\u662f\u4e00\u4e2a\u91cd\u8981\u7684\u65b9\u6cd5\uff0c\u7528\u4e8e\u5b9a\u4e49\u6a21\u578b\u7684\u524d\u5411\u4f20\u64ad\u8fc7\u7a0b\u3002\u5f53\u7ed9\u5b9a\u8f93\u5165\u6570\u636e\u65f6\uff0c\u795e\u7ecf\u7f51\u7edc\u5c06\u6309\u7167forward()\u65b9\u6cd5\u4e2d\u7684\u5b9a\u4e49\uff0c\u5bf9\u8f93\u5165\u8fdb\u884c\u4e00\u7cfb\u5217\u7684\u8ba1\u7b97\u548c\u8f6c\u6362\uff0c\u5e76\u8f93\u51fa\u6700\u7ec8\u7684\u7ed3\u679c\u3002<\/p>\n<p>\u5728PyTorch\u4e2d\uff0c\u5b9a\u4e49\u4e00\u4e2a\u795e\u7ecf\u7f51\u7edc\u65f6\u9700\u8981\u7ee7\u627fnn.Module\u7c7b\uff0c\u5e76\u91cd\u5199\u5176\u4e2d\u7684forward()\u65b9\u6cd5\u3002\u5728\u91cd\u5199forward()\u65b9\u6cd5\u65f6\uff0c\u9700\u8981\u4f7f\u7528PyTorch\u63d0\u4f9b\u7684\u5404\u79cd\u5f20\u91cf\u64cd\u4f5c\uff08\u5982\u5377\u79ef\u3001\u6c60\u5316\u3001\u5168\u8fde\u63a5\u7b49\uff09\uff0c\u6765\u5b9e\u73b0\u6a21\u578b\u7684\u5177\u4f53\u529f\u80fd\u3002<\/p>\n<p>\u5728\u6a21\u578b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\uff0c\u6bcf\u6b21\u8f93\u5165\u6570\u636e\u65f6\uff0cforward()\u65b9\u6cd5\u5c06\u88ab\u81ea\u52a8\u8c03\u7528\uff0c\u5e76\u8fd4\u56de\u6a21\u578b\u7684\u8f93\u51fa\u7ed3\u679c\u3002\u7136\u540e\u901a\u8fc7\u8ba1\u7b97\u8f93\u51fa\u7ed3\u679c\u4e0e\u5b9e\u9645\u6807\u7b7e\u4e4b\u95f4\u7684\u5dee\u5f02\uff0c\u518d\u4f7f\u7528\u53cd\u5411\u4f20\u64ad\u7b97\u6cd5\u6765\u66f4\u65b0\u6a21\u578b\u7684\u53c2\u6570\uff0c\u4f7f\u5f97\u6a21\u578b\u80fd\u591f\u4e0d\u65ad\u5730\u9010\u6e10\u4f18\u5316\uff0c\u6700\u7ec8\u8fbe\u5230\u9884\u671f\u7684\u6548\u679c\u3002<\/p>\n<h3>\u5176\u8fd0\u884c\u7ed3\u679c\u4e3a:<\/h3>\n<pre><code>tensor([[[ -1.2578,   2.7126, -34.2691,  ..., -10.0247, -24.6125, -18.9279],\n         [ 10.4483, -26.1474,   9.3407,  ..., -56.0667,  26.7694,  -8.8562],\n         [-25.8409, -10.9467, -15.8338,  ..., -43.4646, -46.5105, -39.5861],\n         [  4.4863, -10.8304,  18.8405,  ...,  20.7088, -13.4723,  11.8239]],\n\n        [[-19.8429,  -2.2979, -49.3931,  ..., -11.1465, -41.0410,  12.8204],\n         [ 42.3238,   3.9417,  -8.6706,  ...,  27.7693,  17.9512,  -8.0164],\n         [ 10.4532,  28.1825,  48.5035,  ...,  16.2428, -10.8276,  39.2088],\n         [ 13.9543,  -7.9238,  11.3745,  ...,   5.7166,  23.8743,   4.5359]]],\n       grad_fn=&lt;MulBackward0&gt;)\ntorch.Size([2, 4, 512])\n<\/code><\/pre>\n<h2>mian.py\u4ee3\u7801\u8be6\u7ec6\u89e3\u8bfb<\/h2>\n<pre><code class=\"language-python\">import os\nimport argparse\n\nfrom torch.backends import cudnn\nfrom utils.utils import *\n\nfrom solver import Solver\n\ndef str2bool(v):\n    return v.lower() in (&#039;true&#039;)\n\ndef main(config):\n    cudnn.benchmark = True\n    if (not os.path.exists(config.model_save_path)):\n        mkdir(config.model_save_path)\n    solver = Solver(vars(config))\n\n    if config.mode == &#039;train&#039;:\n        solver.train()\n    elif config.mode == &#039;test&#039;:\n        solver.test()\n\n    return solver\n\nif __name__ == &#039;__main__&#039;:\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument(&#039;--lr&#039;, type=float, default=1e-4)\n    parser.add_argument(&#039;--num_epochs&#039;, type=int, default=1) #\u8bad\u7ec3\u8f6e\u6570\n    parser.add_argument(&#039;--k&#039;, type=int, default=2)\n    parser.add_argument(&#039;--win_size&#039;, type=int, default=10) #\u7a97\u53e3\u5927\u5c0f\n    parser.add_argument(&#039;--input_c&#039;, type=int, default=25)  #\u8f93\u5165\u7ef4\u5ea6\n    parser.add_argument(&#039;--output_c&#039;, type=int, default=25)  #\u8f93\u51fa\u7ef4\u5ea6\n    parser.add_argument(&#039;--batch_size&#039;, type=int, default=16) #\u6279\u6b21\u5927\u5c0f\n    parser.add_argument(&#039;--pretrained_model&#039;, type=str, default=None)\n    parser.add_argument(&#039;--dataset&#039;, type=str, default=&#039;SMAP&#039;)\n    parser.add_argument(&#039;--mode&#039;, type=str, default=&#039;test&#039;, choices=[&#039;train&#039;, &#039;test&#039;])\n    parser.add_argument(&#039;--data_path&#039;, type=str, default=&#039;.\/Scripts\/SMAP&#039;)\n    parser.add_argument(&#039;--model_save_path&#039;, type=str, default=&#039;Scripts\/SMAP\/checkpoints&#039;)\n    parser.add_argument(&#039;--anormly_ratio&#039;, type=float, default=.8)\n\n    config = parser.parse_args()\n\n    args = vars(config)\n    print(&#039;------------ Options -------------&#039;)\n    for k, v in sorted(args.items()):\n        print(&#039;%s: %s&#039; % (str(k), str(v)))\n    print(&#039;-------------- End ----------------&#039;)\n    main(config)<\/code><\/pre>\n<h2>Solver.py\u4ee3\u7801\u8be6\u7ec6\u89e3\u8bfb<\/h2>\n<h3>my_kl_loss(p, q)<\/h3>\n<pre><code class=\"language-py\">def my_kl_loss(p, q):\n    res = p * (torch.log(p + 0.0001) - torch.log(q + 0.0001))\n    return torch.mean(torch.sum(res, dim=-1), dim=1)<\/code><\/pre>\n<p>\u8fd9\u662f\u4e00\u4e2a\u5b9a\u4e49KL\u6563\u5ea6\uff08Kullback-Leibler divergence\uff09\u635f\u5931\u51fd\u6570\u7684\u51fd\u6570\u3002KL\u6563\u5ea6\u662f\u4e00\u79cd\u8861\u91cf\u4e24\u4e2a\u6982\u7387\u5206\u5e03\u4e4b\u95f4\u5dee\u5f02\u7684\u65b9\u6cd5\uff0c\u901a\u5e38\u7528\u4e8e\u5ea6\u91cf\u6a21\u578b\u751f\u6210\u7684\u6982\u7387\u5206\u5e03\u4e0e\u771f\u5b9e\u6982\u7387\u5206\u5e03\u4e4b\u95f4\u7684\u8ddd\u79bb\u3002<\/p>\n<p>\u8f93\u5165\u53c2\u6570p\u548cq\u5206\u522b\u8868\u793a\u4e24\u4e2a\u6982\u7387\u5206\u5e03\uff0c\u5b83\u4eec\u7684\u5f62\u72b6\u4e3a(batch_size, num_classes)\uff0c\u5176\u4e2dbatch_size\u8868\u793a\u6279\u91cf\u5927\u5c0f\uff0cnum_classes\u8868\u793a\u7c7b\u522b\u6570\u3002\u51fd\u6570\u9996\u5148\u8ba1\u7b97\u4e24\u4e2a\u5206\u5e03\u4e4b\u95f4\u7684KL\u6563\u5ea6\uff0c\u7136\u540e\u53d6\u6240\u6709\u6837\u672c\u7684KL\u6563\u5ea6\u7684\u5e73\u5747\u503c\u4f5c\u4e3a\u635f\u5931\u51fd\u6570\u7684\u503c\u3002<\/p>\n<p>\u5177\u4f53\u6765\u8bf4\uff0c\u51fd\u6570\u4e2d\u7684res\u53d8\u91cf\u8868\u793aKL\u6563\u5ea6\u7684\u5206\u5b50\u90e8\u5206\uff0c\u5373p\u548cq\u4e4b\u95f4\u7684\u5dee\u5f02\u3002\u7531\u4e8e\u8ba1\u7b97\u4e2d\u4f1a\u6d89\u53ca\u5230\u5bf9\u6570\u8fd0\u7b97\uff0c\u56e0\u6b64\u5728\u51fd\u6570\u4e2d\u4f7f\u7528torch.log()\u51fd\u6570\u6765\u8ba1\u7b97\u5bf9\u6570\u3002\u5728\u8ba1\u7b97\u8fc7\u7a0b\u4e2d\uff0c\u4e3a\u4e86\u907f\u514d\u5bf9\u6570\u7684\u7ed3\u679c\u51fa\u73b0\u8d1f\u65e0\u7a77\u7684\u60c5\u51b5\uff0c\u901a\u5e38\u4f1a\u5bf9p\u548cq\u90fd\u52a0\u4e0a\u4e00\u4e2a\u5f88\u5c0f\u7684\u5e38\u6570\uff0c\u8fd9\u91cc\u7684\u5e38\u6570\u662f0.0001\u3002\u7136\u540e\uff0c\u4f7f\u7528torch.sum()\u51fd\u6570\u8ba1\u7b97res\u4e2d\u6240\u6709\u5143\u7d20\u7684\u548c\uff0c\u5f97\u5230\u6bcf\u4e2a\u6837\u672c\u7684KL\u6563\u5ea6\u503c\u3002\u6700\u540e\uff0c\u4f7f\u7528torch.mean()\u51fd\u6570\u8ba1\u7b97\u6240\u6709\u6837\u672c\u7684KL\u6563\u5ea6\u7684\u5e73\u5747\u503c\uff0c\u5e76\u8fd4\u56de\u8be5\u503c\u4f5c\u4e3a\u635f\u5931\u51fd\u6570\u7684\u8f93\u51fa\u3002<\/p>\n<p>\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u7531\u4e8eKL\u6563\u5ea6\u7684\u8ba1\u7b97\u65b9\u5f0f\u662f\u4e0d\u5bf9\u79f0\u7684\uff0c\u5373KL(p||q) != KL(q||p)\uff0c\u56e0\u6b64\u5728\u4f7f\u7528\u8be5\u635f\u5931\u51fd\u6570\u65f6\u9700\u8981\u660e\u786ep\u548cq\u7684\u987a\u5e8f\u3002\u901a\u5e38\u60c5\u51b5\u4e0b\uff0c\u6211\u4eec\u4f1a\u5c06\u771f\u5b9e\u6982\u7387\u5206\u5e03p\u4f5c\u4e3a\u7b2c\u4e00\u4e2a\u53c2\u6570\uff0c\u5c06\u751f\u6210\u6982\u7387\u5206\u5e03q\u4f5c\u4e3a\u7b2c\u4e8c\u4e2a\u53c2\u6570\uff0c\u4ee5\u8ba1\u7b97KL(p||q)\u3002<\/p>\n<h3>adjust_learning<em>rate(optimizer, epoch, lr<\/em>)<\/h3>\n<pre><code class=\"language-py\">def adjust_learning_rate(optimizer, epoch, lr_):\n    lr_adjust = {epoch: lr_ * (0.5 ** ((epoch - 1) \/\/ 1))}\n    if epoch in lr_adjust.keys():\n        lr = lr_adjust[epoch]\n        for param_group in optimizer.param_groups:\n            param_group[&#039;lr&#039;] = lr\n        print(&#039;Updating learning rate to {}&#039;.format(lr))<\/code><\/pre>\n<p>\u8fd9\u662f\u4e00\u4e2a\u7528\u4e8e\u52a8\u6001\u8c03\u6574\u5b66\u4e60\u7387\u7684\u51fd\u6570\u3002\u5728\u795e\u7ecf\u7f51\u7edc\u8bad\u7ec3\u4e2d\uff0c\u5b66\u4e60\u7387\u662f\u63a7\u5236\u6a21\u578b\u53c2\u6570\u66f4\u65b0\u6b65\u957f\u7684\u91cd\u8981\u8d85\u53c2\u6570\uff0c\u8c03\u6574\u5b66\u4e60\u7387\u53ef\u4ee5\u5e2e\u52a9\u6a21\u578b\u66f4\u597d\u5730\u6536\u655b\u5e76\u907f\u514d\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u9707\u8361\u3002<\/p>\n<p>\u8be5\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\uff1aoptimizer\u8868\u793a\u4f18\u5316\u5668\u5bf9\u8c61\uff0cepoch\u8868\u793a\u5f53\u524d\u8bad\u7ec3\u8f6e\u6570\uff0clr_\u8868\u793a\u521d\u59cb\u5b66\u4e60\u7387\u3002\u5176\u4e2d\uff0coptimizer\u7528\u4e8e\u66f4\u65b0\u6a21\u578b\u53c2\u6570\uff0cepoch\u7528\u4e8e\u63a7\u5236\u5b66\u4e60\u7387\u66f4\u65b0\u7684\u65f6\u95f4\uff0clr_\u7528\u4e8e\u8bbe\u7f6e\u521d\u59cb\u5b66\u4e60\u7387\u3002<\/p>\n<p>\u51fd\u6570\u4e2d\uff0c\u6211\u4eec\u9996\u5148\u5b9a\u4e49\u4e86\u4e00\u4e2a\u5b57\u5178lr_adjust\uff0c\u7528\u4e8e\u5b58\u50a8\u6bcf\u4e2aepoch\u5bf9\u5e94\u7684\u5b66\u4e60\u7387\u5927\u5c0f\u3002\u5b57\u5178\u7684\u952e\u4e3aepoch\uff0c\u503c\u4e3a\u5b66\u4e60\u7387\u5927\u5c0f\u3002\u8fd9\u91cc\u91c7\u7528\u4e86\u4e00\u79cd\u52a8\u6001\u8c03\u6574\u5b66\u4e60\u7387\u7684\u65b9\u6cd5\uff0c\u5373\u6bcf\u96941\u4e2aepoch\u5c31\u5c06\u5b66\u4e60\u7387\u4e58\u4ee50.5\u3002\u8fd9\u6837\uff0c\u6bcf\u96941\u4e2aepoch\uff0c\u5b66\u4e60\u7387\u5c31\u4f1a\u51cf\u534a\u3002\u4f8b\u5982\uff0c\u5728\u7b2c1\u4e2aepoch\u7ed3\u675f\u540e\uff0c\u5b66\u4e60\u7387\u5c06\u53d8\u4e3a\u521d\u59cb\u5b66\u4e60\u7387\u7684\u4e00\u534a\uff1b\u5728\u7b2c2\u4e2aepoch\u7ed3\u675f\u540e\uff0c\u5b66\u4e60\u7387\u5c06\u53d8\u4e3a\u521d\u59cb\u5b66\u4e60\u7387\u7684\u56db\u5206\u4e4b\u4e00\uff1b\u5728\u7b2c3\u4e2aepoch\u7ed3\u675f\u540e\uff0c\u5b66\u4e60\u7387\u5c06\u53d8\u4e3a\u521d\u59cb\u5b66\u4e60\u7387\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u4ee5\u6b64\u7c7b\u63a8\u3002<\/p>\n<p>\u63a5\u4e0b\u6765\uff0c\u51fd\u6570\u68c0\u67e5\u5f53\u524depoch\u662f\u5426\u9700\u8981\u8c03\u6574\u5b66\u4e60\u7387\u3002\u5982\u679c\u5f53\u524depoch\u5728lr_adjust\u5b57\u5178\u4e2d\uff0c\u8bf4\u660e\u9700\u8981\u8c03\u6574\u5b66\u4e60\u7387\u3002\u6b64\u65f6\uff0c\u6211\u4eec\u4ece\u5b57\u5178\u4e2d\u83b7\u53d6\u5f53\u524depoch\u5bf9\u5e94\u7684\u5b66\u4e60\u7387\uff0c\u5e76\u4f7f\u7528optimizer.param_groups\u5c06\u4f18\u5316\u5668\u4e2d\u6240\u6709\u53c2\u6570\u7ec4\u7684\u5b66\u4e60\u7387\u66f4\u65b0\u4e3a\u65b0\u7684\u5b66\u4e60\u7387\u3002\u6700\u540e\uff0c\u51fd\u6570\u8f93\u51fa\u8c03\u6574\u540e\u7684\u5b66\u4e60\u7387\u3002<\/p>\n<h3>vali(self, vali_loader)<\/h3>\n<pre><code class=\"language-python\">def vali(self, vali_loader):\n        self.model.eval()\n\n        loss_1 = []\n        loss_2 = []\n        for i, (input_data, _) in enumerate(vali_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                series_loss += (torch.mean(my_kl_loss(series[u], (\n                        prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                               self.win_size)).detach())) + torch.mean(\n                    my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)).detach(),\n                        series[u])))\n                prior_loss += (torch.mean(\n                    my_kl_loss((prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)),\n                               series[u].detach())) + torch.mean(\n                    my_kl_loss(series[u].detach(),\n                               (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n            series_loss = series_loss \/ len(prior)\n            prior_loss = prior_loss \/ len(prior)\n\n            rec_loss = self.criterion(output, input)\n            loss_1.append((rec_loss - self.k * series_loss).item())\n            loss_2.append((rec_loss + self.k * prior_loss).item())\n\n        return np.average(loss_1), np.average(loss_2)<\/code><\/pre>\n<p>\u8fd9\u662f\u4e00\u4e2aPyTorch\u4e2d\u7684\u51fd\u6570\uff0c\u7528\u4e8e\u5728\u9a8c\u8bc1\u96c6\u4e0a\u8fdb\u884c\u6a21\u578b\u8bc4\u4f30\u3002\u8be5\u51fd\u6570\u63a5\u53d7\u4e00\u4e2a\u9a8c\u8bc1\u96c6\u6570\u636e\u52a0\u8f7d\u5668<code>vali_loader<\/code>\u4f5c\u4e3a\u8f93\u5165\uff0c\u5e76\u8fd4\u56de\u4e24\u4e2a\u5e73\u5747\u635f\u5931\u503c<code>loss_1<\/code>\u548c<code>loss_2<\/code>\u3002<\/p>\n<p>\u5728\u51fd\u6570\u5185\u90e8\uff0c\u6a21\u578b\u88ab\u8bbe\u7f6e\u4e3a\u8bc4\u4f30\u6a21\u5f0f\uff0c\u5373<code>self.model.eval()<\/code>\u3002\u7136\u540e\u904d\u5386\u9a8c\u8bc1\u96c6\u4e2d\u7684\u6bcf\u4e2a\u6570\u636e\u6837\u672c\uff0c\u5c06\u8f93\u5165\u6570\u636e\u8f6c\u6362\u4e3a\u6d6e\u70b9\u5f20\u91cf\u5e76\u4f20\u8f93\u5230\u8bbe\u5907\uff08GPU\u6216CPU\uff09\u3002\u7136\u540e\uff0c\u901a\u8fc7\u5c06\u8f93\u5165\u4f20\u9012\u5230<code>self.model<\/code>\u83b7\u5f97\u6a21\u578b\u7684\u8f93\u51fa\u3001series\u3001prior\u548c\u4e00\u4e2a\u672a\u4f7f\u7528\u7684\u53d8\u91cf\u3002\u8fd9\u4e9b\u8f93\u51fa\u5c06\u88ab\u7528\u4e8e\u8ba1\u7b97\u635f\u5931\u3002<\/p>\n<p>\u5728\u4e0b\u9762\u7684\u5faa\u73af\u4e2d\uff0c\u904d\u5386\u5148\u524d\u5f97\u5230\u7684prior\uff0c\u5e76\u8ba1\u7b97series_loss\u548cprior_loss\uff0c\u8fd9\u4e9b\u90fd\u662f\u901a\u8fc7\u8c03\u7528<code>my_kl_loss<\/code>\u51fd\u6570\u6765\u8ba1\u7b97\u7684\u3002\u5176\u4e2d\uff0c<code>my_kl_loss<\/code>\u8ba1\u7b97\u8f93\u5165\u5206\u5e03\uff08p\uff09\u548c\u76ee\u6807\u5206\u5e03\uff08q\uff09\u4e4b\u95f4\u7684KL\u6563\u5ea6\u3002<\/p>\n<p>\u5728\u4e0b\u9762\u7684\u4ee3\u7801\u4e2d\uff0c\u6709\u4e00\u4e2a\u7cfb\u6570k\uff0c\u7528\u4e8e\u8c03\u6574\u7cfb\u5217\u635f\u5931\u548c\u5148\u524d\u635f\u5931\u4e4b\u95f4\u7684\u5e73\u8861\u3002\u901a\u8fc7\u8ba1\u7b97\u91cd\u6784\u635f\u5931\u4e0ek\u00d7series_loss\u4e4b\u95f4\u7684\u5dee\u5f02\uff0c\u53ef\u4ee5\u83b7\u5f97<code>loss_1<\/code>\u3002\u901a\u8fc7\u8ba1\u7b97\u91cd\u6784\u635f\u5931\u4e0ek\u00d7prior_loss\u4e4b\u95f4\u7684\u603b\u548c\uff0c\u53ef\u4ee5\u83b7\u5f97<code>loss_2<\/code>\u3002\u8fd9\u4e9b\u635f\u5931\u503c\u5c06\u88ab\u5b58\u50a8\u5728<code>loss_1<\/code>\u548c<code>loss_2<\/code>\u4e2d\uff0c\u5e76\u6700\u7ec8\u8fd4\u56de\u5b83\u4eec\u7684\u5e73\u5747\u503c\u3002<\/p>\n<h3>train(self)<\/h3>\n<pre><code class=\"language-python\">def train(self):\n\n        print(&quot;======================TRAIN MODE======================&quot;)\n\n        time_now = time.time()\n        path = self.model_save_path\n        if not os.path.exists(path):\n            os.makedirs(path)\n        early_stopping = EarlyStopping(patience=3, verbose=True, dataset_name=self.dataset)\n        train_steps = len(self.train_loader)\n\n        for epoch in range(self.num_epochs):\n            iter_count = 0\n            loss1_list = []\n\n            epoch_time = time.time()\n            self.model.train()\n            for i, (input_data, labels) in enumerate(self.train_loader):\n\n                self.optimizer.zero_grad()\n                iter_count += 1\n                input = input_data.float().to(self.device)\n\n                output, series, prior, _ = self.model(input)\n\n                # calculate Association discrepancy\n                series_loss = 0.0\n                prior_loss = 0.0\n                for u in range(len(prior)):\n                    series_loss += (torch.mean(my_kl_loss(series[u], (\n                            prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach())) + torch.mean(\n                        my_kl_loss((prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                           self.win_size)).detach(),\n                                   series[u])))\n                    prior_loss += (torch.mean(my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach())) + torch.mean(\n                        my_kl_loss(series[u].detach(), (\n                                prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                       self.win_size)))))\n                series_loss = series_loss \/ len(prior)\n                prior_loss = prior_loss \/ len(prior)\n\n                rec_loss = self.criterion(output, input)\n\n                loss1_list.append((rec_loss - self.k * series_loss).item())\n                loss1 = rec_loss - self.k * series_loss\n                loss2 = rec_loss + self.k * prior_loss\n\n                if (i + 1) % 100 == 0:\n                    speed = (time.time() - time_now) \/ iter_count\n                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)\n                    print(&#039;\\tspeed: {:.4f}s\/iter; left time: {:.4f}s&#039;.format(speed, left_time))\n                    iter_count = 0\n                    time_now = time.time()\n\n                # Minimax strategy\n                loss1.backward(retain_graph=True)\n                loss2.backward()\n                self.optimizer.step()\n\n            print(&quot;Epoch: {} cost time: {}&quot;.format(epoch + 1, time.time() - epoch_time))\n            train_loss = np.average(loss1_list)\n\n            vali_loss1, vali_loss2 = self.vali(self.test_loader)\n\n            print(\n                &quot;Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} &quot;.format(\n                    epoch + 1, train_steps, train_loss, vali_loss1))\n            early_stopping(vali_loss1, vali_loss2, self.model, path)\n            if early_stopping.early_stop:\n                print(&quot;Early stopping&quot;)\n                break\n            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)\n<\/code><\/pre>\n<p>\u8fd9\u662f\u4e00\u4e2a\u8bad\u7ec3\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u4ee3\u7801\u3002\u4ee5\u4e0b\u662f\u5bf9\u4e3b\u8981\u4ee3\u7801\u5757\u7684\u89e3\u91ca\uff1a<\/p>\n<ul>\n<li><code>train<\/code>: \u6b64\u65b9\u6cd5\u662f\u8bad\u7ec3\u6a21\u578b\u7684\u4e3b\u8981\u51fd\u6570\u3002\u5b83\u5c06\u6570\u636e\u52a0\u8f7d\u5668\u4f20\u9012\u7ed9\u6a21\u578b\u5e76\u8fed\u4ee3\u6307\u5b9a\u6570\u91cf\u7684\u65f6\u671f\u3002\u5728\u6bcf\u4e2a\u65f6\u671f\u4e2d\uff0c\u5b83\u901a\u8fc7\u8c03\u7528 <code>vali<\/code> \u65b9\u6cd5\u8ba1\u7b97\u9a8c\u8bc1\u96c6\u4e0a\u7684\u635f\u5931\u3002\u8fd8\u6709\u4e00\u4e2a\u540d\u4e3a <code>EarlyStopping<\/code> \u7684\u56de\u8c03\uff0c\u5b83\u4f1a\u5728\u9a8c\u8bc1\u96c6\u4e0a\u7684\u635f\u5931\u4e0d\u518d\u6539\u5584\u65f6\u505c\u6b62\u8bad\u7ec3\u3002<\/li>\n<li><code>self.optimizer.zero_grad()<\/code>: \u5728\u6bcf\u4e2a\u6279\u6b21\u7684\u5f00\u59cb\u65f6\uff0c\u5c06\u4f18\u5316\u5668\u7684\u68af\u5ea6\u8bbe\u7f6e\u4e3a\u96f6\uff0c\u4ee5\u9632\u6b62\u68af\u5ea6\u5728\u591a\u4e2a\u6279\u6b21\u4e4b\u95f4\u7d2f\u79ef\u3002<\/li>\n<li><code>self.model.train()<\/code>: \u5728\u6bcf\u4e2a\u6279\u6b21\u7684\u5f00\u59cb\u65f6\uff0c\u5c06\u6a21\u578b\u8bbe\u7f6e\u4e3a\u8bad\u7ec3\u6a21\u5f0f\uff0c\u4ee5\u786e\u4fdd\u5728\u8bad\u7ec3\u65f6\u4f7f\u7528\u6279\u91cf\u89c4\u8303\u5316\u7b49\u6280\u672f\u3002<\/li>\n<li><code>self.optimizer.step()<\/code>: \u5728\u8ba1\u7b97\u4e86\u68af\u5ea6\u4e4b\u540e\uff0c\u8c03\u7528\u6b64\u65b9\u6cd5\u6765\u6267\u884c\u4f18\u5316\u6b65\u9aa4\uff0c\u5373\u4f7f\u7528\u68af\u5ea6\u66f4\u65b0\u6a21\u578b\u53c2\u6570\u3002<\/li>\n<li><code>loss1.backward(retain_graph=True)<\/code>: \u8ba1\u7b97\u524d\u5411\u4f20\u9012\u540e\u7684\u53cd\u5411\u4f20\u9012\u68af\u5ea6\uff0c\u5373\u8ba1\u7b97\u76f8\u5bf9\u4e8e <code>loss1<\/code> \u7684\u68af\u5ea6\uff0c\u540c\u65f6\u4fdd\u7559\u8ba1\u7b97\u56fe\u4ee5\u8fdb\u884c\u540e\u7eed\u8ba1\u7b97\u3002<\/li>\n<li><code>loss2.backward()<\/code>: \u8ba1\u7b97\u76f8\u5bf9\u4e8e <code>loss2<\/code> \u7684\u68af\u5ea6\uff0c\u5e76\u4f7f\u7528\u68af\u5ea6\u66f4\u65b0\u6a21\u578b\u53c2\u6570\u3002<\/li>\n<li><code>early_stopping(vali_loss1, vali_loss2, self.model, path)<\/code>: \u5728\u6bcf\u4e2a\u65f6\u671f\u7ed3\u675f\u65f6\uff0c\u5c06\u6a21\u578b\u548c\u8def\u5f84\u4f20\u9012\u7ed9 <code>EarlyStopping<\/code> \u56de\u8c03\uff0c\u4ee5\u68c0\u67e5\u6a21\u578b\u662f\u5426\u5e94\u8be5\u88ab\u65e9\u671f\u505c\u6b62\u3002<\/li>\n<\/ul>\n<p>\u8fd9\u6bb5\u4ee3\u7801\u4e2d\u8fd8\u6709\u4e00\u4e9b\u81ea\u5b9a\u4e49\u51fd\u6570\uff0c\u5982 <code>my_kl_loss<\/code> \u548c <code>adjust_learning_rate<\/code>\uff0c\u8fd9\u4e9b\u51fd\u6570\u5728\u4ee3\u7801\u4e2d\u6ca1\u6709\u7ed9\u51fa\u3002<\/p>\n<h3>test(self)<\/h3>\n<pre><code class=\"language-python\">    def test(self):\n        self.model.load_state_dict(\n            torch.load(\n                os.path.join(str(self.model_save_path), str(self.dataset) + &#039;_checkpoint.pth&#039;)))\n        self.model.eval()\n        temperature = 50\n\n        print(&quot;======================TEST MODE======================&quot;)\n\n        criterion = nn.MSELoss(reduce=False)\n\n        # (1) stastic on the train set\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.train_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n            loss = torch.mean(criterion(input, output), dim=-1)\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n            cri = metric * loss\n            cri = cri.detach().cpu().numpy()\n            attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        train_energy = np.array(attens_energy)\n\n        # (2) find the threshold\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.thre_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n\n            loss = torch.mean(criterion(input, output), dim=-1)\n\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n            # Metric\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n            cri = metric * loss\n            cri = cri.detach().cpu().numpy()\n            attens_energy.append(cri)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        combined_energy = np.concatenate([train_energy, test_energy], axis=0)\n        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)\n        print(&quot;Threshold :&quot;, thresh)\n\n        # (3) evaluation on the test set\n        test_labels = []\n        attens_energy = []\n        for i, (input_data, labels) in enumerate(self.thre_loader):\n            input = input_data.float().to(self.device)\n            output, series, prior, _ = self.model(input)\n\n            loss = torch.mean(criterion(input, output), dim=-1)\n\n            series_loss = 0.0\n            prior_loss = 0.0\n            for u in range(len(prior)):\n                if u == 0:\n                    series_loss = my_kl_loss(series[u], (\n                            prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss = my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n                else:\n                    series_loss += my_kl_loss(series[u], (\n                            prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                   self.win_size)).detach()) * temperature\n                    prior_loss += my_kl_loss(\n                        (prior[u] \/ torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,\n                                                                                                self.win_size)),\n                        series[u].detach()) * temperature\n            metric = torch.softmax((-series_loss - prior_loss), dim=-1)\n\n            cri = metric * loss\n            cri = cri.detach().cpu().numpy()\n            attens_energy.append(cri)\n            test_labels.append(labels)\n\n        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)\n        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)\n        test_energy = np.array(attens_energy)\n        test_labels = np.array(test_labels)\n\n        pred = (test_energy &gt; thresh).astype(int)\n\n        gt = test_labels.astype(int)\n\n        print(&quot;pred:   &quot;, pred.shape)\n        print(&quot;gt:     &quot;, gt.shape)\n\n        # detection adjustment\n        anomaly_state = False\n        for i in range(len(gt)):\n            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:\n                anomaly_state = True\n                for j in range(i, 0, -1):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n                for j in range(i, len(gt)):\n                    if gt[j] == 0:\n                        break\n                    else:\n                        if pred[j] == 0:\n                            pred[j] = 1\n            elif gt[i] == 0:\n                anomaly_state = False\n            if anomaly_state:\n                pred[i] = 1\n\n        pred = np.array(pred)\n        gt = np.array(gt)\n        print(&quot;pred: &quot;, pred.shape)\n        print(&quot;gt:   &quot;, gt.shape)\n\n        from sklearn.metrics import precision_recall_fscore_support\n        from sklearn.metrics import accuracy_score\n        accuracy = accuracy_score(gt, pred)\n        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred,\n                                                                              average=&#039;binary&#039;)\n        print(\n            &quot;Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} &quot;.format(\n                accuracy, precision,\n                recall, f_score))\n        return accuracy, precision, recall, f_score<\/code><\/pre>\n<p>\u8fd9\u6bb5\u4ee3\u7801\u662f\u7528\u4e8e\u5728\u65f6\u5e8f\u6570\u636e\u4e2d\u68c0\u6d4b\u5f02\u5e38\u7684\u4e00\u4e2a\u65b9\u6cd5\u3002\u4e3b\u8981\u5206\u4e3a\u4e09\u4e2a\u6b65\u9aa4\uff1a<\/p>\n<p>\u7b2c\u4e00\u6b65\u662f\u5bf9\u8bad\u7ec3\u96c6\u8fdb\u884c\u7edf\u8ba1\uff0c\u83b7\u53d6\u6bcf\u4e2a\u65f6\u95f4\u5e8f\u5217\u70b9\u7684\u5f02\u5e38\u5f97\u5206\uff08energy\uff09\uff0c\u8fd9\u91cc\u4f7f\u7528\u4e86 MSELoss \u4f5c\u4e3a\u635f\u5931\u51fd\u6570\uff0c\u7136\u540e\u7ed3\u5408\u6a21\u578b\u7684\u8f93\u51fa\u7ed3\u679c\u8ba1\u7b97\u6bcf\u4e2a\u70b9\u7684\u5f02\u5e38\u5f97\u5206\u3002<\/p>\n<p>\u7b2c\u4e8c\u6b65\u662f\u6839\u636e\u8bad\u7ec3\u96c6\u5f97\u5230\u7684\u5f02\u5e38\u5f97\u5206\u7edf\u8ba1\u51fa\u4e00\u4e2a\u5f02\u5e38\u9608\u503c\u3002\u5177\u4f53\u65b9\u6cd5\u662f\u5c06\u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\u7684\u5f02\u5e38\u5f97\u5206\u5408\u5e76\u540e\uff0c\u627e\u5230\u4e00\u4e2a\u5206\u4f4d\u6570\uff0c\u5c06\u9ad8\u4e8e\u8fd9\u4e2a\u5206\u4f4d\u6570\u7684\u70b9\u6807\u8bb0\u4e3a\u5f02\u5e38\u3002<\/p>\n<p>\u7b2c\u4e09\u6b65\u662f\u5728\u6d4b\u8bd5\u96c6\u4e0a\u8fdb\u884c\u5f02\u5e38\u68c0\u6d4b\uff0c\u5f97\u5230\u6bcf\u4e2a\u70b9\u7684\u5f02\u5e38\u5f97\u5206\uff0c\u5e76\u5c06\u9ad8\u4e8e\u9608\u503c\u7684\u70b9\u6807\u8bb0\u4e3a\u5f02\u5e38\u3002<\/p>\n<p>\u51c6\u786e\u7387\uff08Accuracy\uff09\uff1a\u4ee3\u8868\u6a21\u578b\u5728\u6240\u6709\u6837\u672c\u4e2d\u6b63\u786e\u5206\u7c7b\u7684\u6bd4\u4f8b\uff0c\u5373\u6b63\u786e\u9884\u6d4b\u7684\u6837\u672c\u6570\u4e0e\u603b\u6837\u672c\u6570\u7684\u6bd4\u503c\u3002\u5728\u8fd9\u91cc\uff0c\u51c6\u786e\u7387\u4e3a0.9587\uff0c\u8868\u793a\u6a21\u578b\u6b63\u786e\u9884\u6d4b\u4e8695.87%\u7684\u6837\u672c\u3002<\/p>\n<p>\u7cbe\u786e\u7387\uff08Precision\uff09\uff1a\u4ee3\u8868\u6a21\u578b\u5728\u9884\u6d4b\u4e3a\u5f02\u5e38\u7684\u6837\u672c\u4e2d\uff0c\u771f\u6b63\u662f\u5f02\u5e38\u7684\u6837\u672c\u6240\u5360\u7684\u6bd4\u4f8b\u3002\u5728\u8fd9\u91cc\uff0c\u7cbe\u786e\u7387\u4e3a0.9216\uff0c\u8868\u793a\u6a21\u578b\u5c0692.16%\u7684\u9884\u6d4b\u4e3a\u5f02\u5e38\u7684\u6837\u672c\u6b63\u786e\u5730\u8bc6\u522b\u4e3a\u5f02\u5e38\u3002<\/p>\n<p>\u53ec\u56de\u7387\uff08Recall\uff09\uff1a\u4ee3\u8868\u6a21\u578b\u6b63\u786e\u9884\u6d4b\u4e3a\u5f02\u5e38\u7684\u6837\u672c\u6570\u5360\u6240\u6709\u771f\u6b63\u5f02\u5e38\u7684\u6837\u672c\u6570\u7684\u6bd4\u4f8b\u3002\u5728\u8fd9\u91cc\uff0c\u53ec\u56de\u7387\u4e3a0.9304\uff0c\u8868\u793a\u6a21\u578b\u80fd\u591f\u6b63\u786e\u5730\u8bc6\u522b\u51fa93.04%\u7684\u771f\u6b63\u5f02\u5e38\u6837\u672c\u3002<\/p>\n<p>F1\u503c\uff08F-score\uff09\uff1a\u7efc\u5408\u4e86\u7cbe\u786e\u7387\u548c\u53ec\u56de\u7387\u4e24\u4e2a\u6307\u6807\uff0c\u662f\u4e00\u4e2a\u7efc\u5408\u8bc4\u4f30\u6a21\u578b\u6027\u80fd\u7684\u6307\u6807\u3002\u5728\u8fd9\u91cc\uff0cF1\u503c\u4e3a0.9260\uff0c\u8868\u793a\u6a21\u578b\u7efc\u5408\u8003\u8651\u4e86\u7cbe\u786e\u7387\u548c\u53ec\u56de\u7387\u4e24\u4e2a\u6307\u6807\uff0c\u5177\u6709\u6bd4\u8f83\u597d\u7684\u5206\u7c7b\u6548\u679c\u3002<\/p>\n<p>\u63d0\u9ad8F1\u503c\u7684\u65b9\u6cd5\u3002<\/p>\n<ol>\n<li>\u8c03\u6574\u8d85\u53c2\u6570\uff1a\u60a8\u53ef\u4ee5\u901a\u8fc7\u8c03\u6574\u5b66\u4e60\u7387\u3001\u6b63\u5219\u5316\u53c2\u6570\u3001\u6279\u6b21\u5927\u5c0f\u7b49\u8d85\u53c2\u6570\u6765\u4f18\u5316\u6a21\u578b\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u4ea4\u53c9\u9a8c\u8bc1\u6280\u672f\u6765\u627e\u5230\u6700\u4f73\u7684\u8d85\u53c2\u6570\u7ec4\u5408\u3002<\/li>\n<li>\u6570\u636e\u589e\u5f3a\uff1a\u901a\u8fc7\u5bf9\u6570\u636e\u96c6\u8fdb\u884c\u589e\u5f3a\uff0c\u4f8b\u5982\u65cb\u8f6c\u3001\u7ffb\u8f6c\u3001\u7f29\u653e\u3001\u88c1\u526a\u7b49\uff0c\u53ef\u4ee5\u589e\u52a0\u6570\u636e\u96c6\u7684\u591a\u6837\u6027\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u6a21\u578b\u7684\u6cdb\u5316\u80fd\u529b\u3002<\/li>\n<li>\u66f4\u6362\u635f\u5931\u51fd\u6570\uff1a\u60a8\u53ef\u4ee5\u5c1d\u8bd5\u4f7f\u7528\u4e0d\u540c\u7684\u635f\u5931\u51fd\u6570\uff0c\u4f8b\u5982\u4e8c\u5143\u4ea4\u53c9\u71b5\u3001Focal Loss\u3001Dice Loss\u7b49\uff0c\u4ee5\u83b7\u5f97\u66f4\u597d\u7684\u7ed3\u679c\u3002<\/li>\n<li>\u589e\u52a0\u8bad\u7ec3\u6570\u636e\uff1a\u4f7f\u7528\u66f4\u591a\u7684\u8bad\u7ec3\u6570\u636e\u53ef\u4ee5\u5e2e\u52a9\u6a21\u578b\u66f4\u597d\u5730\u5b66\u4e60\u6570\u636e\u7684\u5206\u5e03\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u6a21\u578b\u7684\u6027\u80fd\u3002<\/li>\n<li>\u8c03\u6574\u6a21\u578b\u7ed3\u6784\uff1a\u901a\u8fc7\u4fee\u6539\u6a21\u578b\u7684\u5c42\u6570\u3001\u5bbd\u5ea6\u3001\u5377\u79ef\u6838\u5927\u5c0f\u7b49\u7ed3\u6784\u53c2\u6570\u6765\u4f18\u5316\u6a21\u578b\u3002<\/li>\n<li>\u4f7f\u7528\u9884\u8bad\u7ec3\u6a21\u578b\uff1a\u4f7f\u7528\u9884\u8bad\u7ec3\u6a21\u578b\u53ef\u4ee5\u52a0\u901f\u6a21\u578b\u7684\u6536\u655b\u901f\u5ea6\uff0c\u5e76\u4e14\u901a\u5e38\u53ef\u4ee5\u63d0\u9ad8\u6a21\u578b\u7684\u6027\u80fd\u3002<\/li>\n<\/ol>\n<h2>\u6570\u636e\u96c6<\/h2>\n<h3>SMD<\/h3>\n<p>SMD\u6570\u636e\u96c6\u662f\u6307\u667a\u80fd\u80fd\u6e90\u7ba1\u7406\u7cfb\u7edf\uff08Smart*\uff09\u4e2d\u7528\u4e8e\u5f02\u5e38\u68c0\u6d4b\u7684\u6570\u636e\u96c6\uff0c\u5176\u4e2d\u5305\u542b\u4e86\u6765\u81ea20\u4e2a\u4e0d\u540c\u697c\u5b87\u7684\u771f\u5b9e\u6570\u636e\u3002\u8fd9\u4e9b\u6570\u636e\u8bb0\u5f55\u4e86\u4e0d\u540c\u7c7b\u578b\u7684\u4f20\u611f\u5668\u6570\u636e\uff0c\u5305\u62ec\u6e29\u5ea6\u3001\u6e7f\u5ea6\u3001\u7535\u529b\u7b49\uff0c\u7528\u4e8e\u68c0\u6d4b\u80fd\u6e90\u7cfb\u7edf\u4e2d\u7684\u5f02\u5e38\u548c\u6545\u969c\u3002SMD\u6570\u636e\u96c6\u53ef\u4ee5\u7528\u4e8e\u8bad\u7ec3\u548c\u6d4b\u8bd5\u5404\u79cd\u5f02\u5e38\u68c0\u6d4b\u7b97\u6cd5\uff0c\u662f\u4e00\u4e2a\u5e38\u7528\u7684\u6570\u636e\u96c6\u4e4b\u4e00\u3002<\/p>\n<h3>SMAP<\/h3>\n<p>SMAP (Soil Moisture Active Passive) \u6570\u636e\u96c6\uff1a\u662f\u7531NASA\u53d1\u8d77\u7684\u4e00\u4e2a\u536b\u661f\u4efb\u52a1\uff0c\u65e8\u5728\u901a\u8fc7\u4f7f\u7528\u5fae\u6ce2\u8f90\u5c04\u8ba1\u548c\u5fae\u6ce2\u5e72\u6d89\u4eea\uff0c\u6d4b\u91cf\u5168\u7403\u571f\u58e4\u6e7f\u5ea6\u548c\u51bb\u878d\u72b6\u6001\u3002SMAP\u6570\u636e\u96c6\u5305\u542b\u6765\u81ea2015\u5e74\u81f3\u4eca\u7684\u571f\u58e4\u6e7f\u5ea6\u6570\u636e\u3001\u8868\u9762\u571f\u58e4\u6e29\u5ea6\u6570\u636e\u548c\u51bb\u878d\u72b6\u6001\u6570\u636e\uff0c\u53ef\u7528\u4e8e\u7814\u7a76\u571f\u58e4\u6e7f\u5ea6\u3001\u6c34\u6587\u5faa\u73af\u548c\u6c14\u5019\u53d8\u5316\u7b49\u95ee\u9898\u3002<\/p>\n<h3>PSM<\/h3>\n<p>PSM (Power System Modeling) \u6570\u636e\u96c6\uff1a\u662f\u4e00\u4e2a\u7528\u4e8e\u7535\u529b\u7cfb\u7edf\u7814\u7a76\u7684\u6570\u636e\u96c6\uff0c\u5305\u62ec\u4e86\u5927\u91cf\u7684\u7535\u529b\u7cfb\u7edf\u8fd0\u884c\u6570\u636e\uff0c\u5982\u7535\u529b\u8d1f\u8377\u3001\u53d1\u7535\u673a\u8f93\u51fa\u3001\u7ebf\u8def\u7535\u538b\u7b49\uff0c\u53ef\u7528\u4e8e\u7535\u529b\u7cfb\u7edf\u5efa\u6a21\u3001\u4f18\u5316\u548c\u63a7\u5236\u7b49\u7814\u7a76\u9886\u57df\u3002<\/p>\n<h3>MSL<\/h3>\n<p>PSM (Power System Modeling) \u6570\u636e\u96c6\uff1a\u662f\u4e00\u4e2a\u7528\u4e8e\u7535\u529b\u7cfb\u7edf\u7814\u7a76\u7684\u6570\u636e\u96c6\uff0c\u5305\u62ec\u4e86\u5927\u91cf\u7684\u7535\u529b\u7cfb\u7edf\u8fd0\u884c\u6570\u636e\uff0c\u5982\u7535\u529b\u8d1f\u8377\u3001\u53d1\u7535\u673a\u8f93\u51fa\u3001\u7ebf\u8def\u7535\u538b\u7b49\uff0c\u53ef\u7528\u4e8e\u7535\u529b\u7cfb\u7edf\u5efa\u6a21\u3001\u4f18\u5316\u548c\u63a7\u5236\u7b49\u7814\u7a76\u9886\u57df\u3002<\/p>\n","protected":false},"excerpt":{"rendered":"<p>Anomaly-transformer\u6280\u672f\u6587\u6863 \u4ecb\u7ecd Anomaly Transformer\u662f\u4e00\u79cd\u57fa\u4e8eTran [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":153,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[6],"tags":[],"class_list":["post-367","post","type-post","status-publish","format-standard","has-post-thumbnail","hentry","category-sec"],"_links":{"self":[{"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/posts\/367","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/comments?post=367"}],"version-history":[{"count":3,"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/posts\/367\/revisions"}],"predecessor-version":[{"id":402,"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/posts\/367\/revisions\/402"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/media\/153"}],"wp:attachment":[{"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/media?parent=367"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/categories?post=367"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/blog.shangwendada.top\/index.php\/wp-json\/wp\/v2\/tags?post=367"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}