diff --git a/db.json b/db.json index e5fc36f..7e9769f 100644 --- a/db.json +++ b/db.json @@ -1 +1 @@ -{"meta":{"version":1,"warehouse":"5.0.1"},"models":{"Asset":[{"_id":"themes/butterfly/source/css/index.styl","path":"css/index.styl","modified":1,"renderable":1},{"_id":"themes/butterfly/source/css/var.styl","path":"css/var.styl","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/404.jpg","path":"img/404.jpg","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/avatar.jpg","path":"img/avatar.jpg","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/favicon.png","path":"img/favicon.png","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/friend_404.gif","path":"img/friend_404.gif","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/main.js","path":"js/main.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/tw_cn.js","path":"js/tw_cn.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/utils.js","path":"js/utils.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/search/algolia.js","path":"js/search/algolia.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/search/local-search.js","path":"js/search/local-search.js","modified":1,"renderable":1},{"_id":"source/img/000001.png","path":"img/000001.png","modified":1,"renderable":0},{"_id":"source/img/peiqian.png","path":"img/peiqian.png","modified":1,"renderable":0},{"_id":"source/img/dingyue.png","path":"img/dingyue.png","modified":1,"renderable":0},{"_id":"source/img/site01.jpg","path":"img/site01.jpg","modified":1,"renderable":0},{"_id":"source/img/yiyuan.png","path":"img/yiyuan.png","modified":1,"renderable":0}],"Cache":[{"_id":"source/_data/link.yml","hash":"6aaf04b5c920e403bea8f82e4b3f4d719760e6df","modified":1723257824766},{"_id":"source/_posts/hello-world.md","hash":"40e804610ff712f079ace7012b862b4efecf82fb","modified":1723206110026},{"_id":"source/categories/index.md","hash":"49618dce0bee26dfc65f0de1794f01d2967cb7b8","modified":1722407843090},{"_id":"source/about/index.md","hash":"31fdd093368e1c18c1592a8cad6f3c3fe6d6711a","modified":1723257494626},{"_id":"source/link/index.md","hash":"26e21fe46bf7c0fc5ae95b73d52bf516368d6dc7","modified":1723257768927},{"_id":"source/movies/index.md","hash":"4abf7a6a712f712f22d80203f6004a89feca2014","modified":1723257633715},{"_id":"source/music/index.md","hash":"d8dcf467af235e0bac09805db3a4ab73ad782b83","modified":1723257619897},{"_id":"source/tags/index.md","hash":"1f27b735b6c7d629c8931b5bd3913bdd659f1981","modified":1722385980076},{"_id":"source/_posts/frontend/frontend.md","hash":"3770215d35203b03d86d4f3a6ceee32f9849b584","modified":1723206110023},{"_id":"source/_posts/linux/script.md","hash":"2deda929ed0c81ddcd00945c673b29f1bd1353c0","modified":1723997145731},{"_id":"source/_posts/machinelearning/01.md","hash":"6129d4857460e41ba62b67c8f9df6311e1aa86d7","modified":1736760419899},{"_id":"source/_posts/net/index.md","hash":"7eae8512c2a6bd937200487540b962d65a47ad9e","modified":1723206110028},{"_id":"source/_posts/ancient/guwenguanzhi/1.md","hash":"629652f1d8e2a347e6b11c367d25b0a26698cd60","modified":1723257063678},{"_id":"source/_posts/frontend/deploy/deploy.md","hash":"ba4a05741f30f92f9fbe7b815519142de09b13fb","modified":1723206110045},{"_id":"source/_posts/frontend/uniapp/component1.md","hash":"b9b981d3903f5e57e7f327d930df4f04780211f9","modified":1723206110042},{"_id":"source/_posts/frontend/uniapp/uniapp.md","hash":"8c8594e3eb73d2c10c4c6dfc008b58c36763b054","modified":1723206110039},{"_id":"source/_posts/bigdata/hadoop/env.md","hash":"4b184c804e9c8083966b1360fe9d5aa539930005","modified":1726065928095},{"_id":"source/_posts/bigdata/hadoop/hdfs.md","hash":"3148149529354105eb301c70961b6f1b68030835","modified":1726066302712},{"_id":"source/_posts/net/jichang/jichang.md","hash":"ad0485f33d2f0a8eea342c815d869911433df370","modified":1723206110036},{"_id":"source/_posts/bigdata/hadoop/zookeper.md","hash":"96e58c86eba61accb620adc5e7b7dbc46e47b237","modified":1726066302706},{"_id":"source/img/000001.png","hash":"ad86c3b72174364d462bdab1d09540bd79eb123c","modified":1725979378674},{"_id":"source/img/yiyuan.png","hash":"817a89509a8ebcddff6b369979d53ecf44a30a9f","modified":1722998234119},{"_id":"themes/butterfly/README_CN.md","hash":"9d729ee2ffc5e5f703ccfbfbbb7b286d59071125","modified":1722500710087},{"_id":"themes/butterfly/README.md","hash":"20a91bea7f7ada8b8195d2abff106a7ce21bba20","modified":1722500710085},{"_id":"themes/butterfly/package.json","hash":"2b6fb6f62b9fa6a829311ffd532ae760fbd0a7db","modified":1722500710223},{"_id":"themes/butterfly/LICENSE","hash":"c8bc7df08db9dd3b39c2c2259a163a36cf2f6808","modified":1722500710084},{"_id":"themes/butterfly/_config.yml","hash":"c6fab3700a6502b5790ca20d20113020544ea048","modified":1722500710088},{"_id":"themes/butterfly/plugins.yml","hash":"7bb2c7350c0c57850aa30213cd0f26553a614702","modified":1722500710224},{"_id":"themes/butterfly/.github/FUNDING.yml","hash":"3b572099a992e30267f5fe4cd3c582ff7ac9f083","modified":1722500710075},{"_id":"themes/butterfly/languages/default.yml","hash":"90e9e2f36dc51aa77eb7804ae048b4876035b12d","modified":1722500710090},{"_id":"themes/butterfly/languages/en.yml","hash":"68127be0e6b44cfc5f31353d8b275c02939b3ff9","modified":1722500710092},{"_id":"themes/butterfly/languages/zh-CN.yml","hash":"2dcc70a011b37890215ae0fd6d8f8c78aa8af6b0","modified":1722500710093},{"_id":"themes/butterfly/languages/zh-TW.yml","hash":"1392e7b8c678cdfb54f55523693e66abc7d80538","modified":1722500710094},{"_id":"themes/butterfly/layout/archive.pug","hash":"bc77220dfc269b8faad0930e1a4142ebf68165e5","modified":1722500710096},{"_id":"themes/butterfly/layout/category.pug","hash":"bf979aec88d78b644fc5d31518f8679ad7625792","modified":1722500710097},{"_id":"themes/butterfly/layout/page.pug","hash":"bf2d6c6d2d156777b55292e51be02b0b3acf0af8","modified":1722500710219},{"_id":"themes/butterfly/layout/post.pug","hash":"fdbb508b5e6dec30fb8753c5a7fdd494410c4fc0","modified":1722500710220},{"_id":"themes/butterfly/layout/index.pug","hash":"648dcbdb3d145a710de81c909e000e8664d2ac9c","modified":1722500710218},{"_id":"themes/butterfly/layout/tag.pug","hash":"4bb5efc6dabdf1626685bf6771aaa1467155ae86","modified":1722500710222},{"_id":"themes/butterfly/.github/ISSUE_TEMPLATE/config.yml","hash":"63ad2249ad09fb3fe21bd5ff9adefb304a7ab24a","modified":1722500710077},{"_id":"themes/butterfly/.github/ISSUE_TEMPLATE/bug_report.yml","hash":"eed9190301095b35081aa2658204cc3f15b9f5e1","modified":1722500710076},{"_id":"themes/butterfly/.github/workflows/stale.yml","hash":"4040c76547e270aaf184e9b219a44ca41bbb1b9f","modified":1722500710082},{"_id":"themes/butterfly/.github/ISSUE_TEMPLATE/feature_request.yml","hash":"6e0f9470b18bd37d4891282ac73d61676b040e8c","modified":1722500710079},{"_id":"themes/butterfly/.github/workflows/publish.yml","hash":"e320b40c051bae1549156cd5ea4a51383cf78598","modified":1722500710080},{"_id":"themes/butterfly/layout/includes/404.pug","hash":"aace9ddff469de4226e47a52ede1c81e66d66d5c","modified":1722500710100},{"_id":"themes/butterfly/layout/includes/additional-js.pug","hash":"50eea5aa78cdeb6c72dd22f0aeabc407cc0f712e","modified":1722500710101},{"_id":"themes/butterfly/layout/includes/footer.pug","hash":"8715948b93e7508b84d913be1969b28c6b067b9b","modified":1722500710102},{"_id":"themes/butterfly/layout/includes/head.pug","hash":"ea8d4e8ac6af93cd268ba8f6ffcb80417bc2501e","modified":1722500710103},{"_id":"themes/butterfly/layout/includes/layout.pug","hash":"96df62e34661d8ca4a45267286127479e5178a79","modified":1722500710123},{"_id":"themes/butterfly/scripts/events/404.js","hash":"f1d1c378356b776e9b2a8411e6dca88dc8c3245c","modified":1722500710226},{"_id":"themes/butterfly/layout/includes/sidebar.pug","hash":"9277fead4c29dbe93976f078adaa26e8f9253da3","modified":1722500710140},{"_id":"themes/butterfly/scripts/events/cdn.js","hash":"7864ba45716c51aef8d8b04fd4bc212e0008ce3b","modified":1722500710226},{"_id":"themes/butterfly/layout/includes/rightside.pug","hash":"f448bf73103b88de4443e52d600e871cf3de3e32","modified":1722500710138},{"_id":"themes/butterfly/scripts/events/comment.js","hash":"95479790234c291b064d031577d71214cdd1d820","modified":1722500710229},{"_id":"themes/butterfly/layout/includes/pagination.pug","hash":"c5c58714fb3cb839653e5c32e6094784c8662935","modified":1722500710135},{"_id":"themes/butterfly/scripts/events/init.js","hash":"ce68e84a9ccfcf91100befbaa9afc392a0cd93bb","modified":1722500710229},{"_id":"themes/butterfly/scripts/events/stylus.js","hash":"0a336dfe5ed08952fa0df1532421df38a74a20d6","modified":1722500710231},{"_id":"themes/butterfly/scripts/events/merge_config.js","hash":"b1dfc3c898b886eab1241b068fc27d7a26a3b7d2","modified":1722500710230},{"_id":"themes/butterfly/scripts/events/welcome.js","hash":"f59e10305fef59ea3e62a7395106c0927582879d","modified":1722500710232},{"_id":"themes/butterfly/scripts/filters/post_lazyload.js","hash":"5ed2d7ef240c927fe1b7a7fb5bf9e55e2bfd55a5","modified":1722500710233},{"_id":"themes/butterfly/scripts/filters/random_cover.js","hash":"0df22d7dbfa766a65cb6032a1f003348f4307cfe","modified":1722500710234},{"_id":"themes/butterfly/scripts/helpers/aside_categories.js","hash":"cdd992c8577d583c237b6aac9f5077d8200879b2","modified":1722500710237},{"_id":"themes/butterfly/scripts/helpers/findArchiveLength.js","hash":"b12895e0765d596494e5526d121de0dd5a7c23d3","modified":1722500710238},{"_id":"themes/butterfly/scripts/helpers/aside_archives.js","hash":"4f712b4ea383b59a3122683db1d54c04a79ccc5d","modified":1722500710236},{"_id":"themes/butterfly/scripts/helpers/related_post.js","hash":"76343ac8422c9c8539082e77eda6ffee4b877eb2","modified":1722500710241},{"_id":"themes/butterfly/scripts/helpers/page.js","hash":"c74d6a9b8f71e69447f7847a5f5e81555d68b140","modified":1722500710240},{"_id":"themes/butterfly/scripts/helpers/inject_head_js.js","hash":"b55f71347d2ead097c7f98c0ec792b091433345c","modified":1722500710239},{"_id":"themes/butterfly/scripts/helpers/series.js","hash":"17c0095bc8d612a268cdcab000b1742dc4c6f811","modified":1722500710243},{"_id":"themes/butterfly/scripts/tag/button.js","hash":"164d5f1c2d1b4cb5a813a6fc574016743a53c019","modified":1722500710244},{"_id":"themes/butterfly/scripts/tag/flink.js","hash":"3ba7677969ff01fab06fc6713455ddc6861f0024","modified":1722500710245},{"_id":"themes/butterfly/scripts/tag/gallery.js","hash":"7ec77b3093f5de67e7032f40a5b12f1389f6f6ff","modified":1722500710247},{"_id":"themes/butterfly/scripts/tag/hide.js","hash":"5d08c3552f7d3c80a724ca628bff66321abe2e5a","modified":1722500710248},{"_id":"themes/butterfly/scripts/tag/label.js","hash":"b013dc0a3d57d2caa18b89263f23871da9ec456d","modified":1722500710248},{"_id":"themes/butterfly/scripts/tag/inlineImg.js","hash":"c863d2732ce4bdc084f2d0db92f50f80328c1007","modified":1722500710248},{"_id":"themes/butterfly/scripts/tag/mermaid.js","hash":"289f85847c58f0b2b7d98a68e370a2896edb8949","modified":1722500710250},{"_id":"themes/butterfly/scripts/tag/note.js","hash":"e68d8d21f3a86e3646907a3685550ee20e8d4a9f","modified":1722500710252},{"_id":"themes/butterfly/scripts/tag/score.js","hash":"35d54adc92e717cc32e13515122b025fd1a98ea2","modified":1722500710252},{"_id":"themes/butterfly/scripts/tag/series.js","hash":"dc56e5182dd3813dc977c9bf8556dcc7615e467b","modified":1722500710252},{"_id":"themes/butterfly/scripts/tag/tabs.js","hash":"7c448886f230adb4f4a0208c88fff809abcb5637","modified":1722500710254},{"_id":"themes/butterfly/scripts/tag/timeline.js","hash":"e611074a5a7f489a8b04afac0a3f7f882ce26532","modified":1722500710255},{"_id":"themes/butterfly/source/css/index.styl","hash":"b13d96924a5534bff91d75566b196ac87b4fac22","modified":1722500710313},{"_id":"themes/butterfly/source/css/var.styl","hash":"950250f66faeb611a67540e0fa6cedbcf5a7a321","modified":1722500710313},{"_id":"themes/butterfly/source/img/404.jpg","hash":"fb4489bc1d30c93d28f7332158c1c6c1416148de","modified":1722500710315},{"_id":"themes/butterfly/source/img/avatar.jpg","hash":"cb0941101c6a6b8f762ce6ffc3c948641e7f642f","modified":1722500710316},{"_id":"themes/butterfly/source/img/favicon.png","hash":"3cf89864b4f6c9b532522a4d260a2e887971c92d","modified":1722500710317},{"_id":"themes/butterfly/source/img/friend_404.gif","hash":"8d2d0ebef70a8eb07329f57e645889b0e420fa48","modified":1722500710319},{"_id":"themes/butterfly/source/js/main.js","hash":"59cd756a94ecdf3ec7b18f50691a8a6305f7a65a","modified":1722500710321},{"_id":"themes/butterfly/source/js/tw_cn.js","hash":"d776c670e4076ad6049dbb64cdee7a734b51d37f","modified":1722500710326},{"_id":"themes/butterfly/source/js/utils.js","hash":"7b871fe0c4456660cff4c7b9cc4ed089adac2caf","modified":1722500710327},{"_id":"themes/butterfly/layout/includes/head/analytics.pug","hash":"c7666a10448edd93f5ace37296051b7670495f1b","modified":1722500710106},{"_id":"themes/butterfly/layout/includes/head/Open_Graph.pug","hash":"c8dbdfe6145a0bc6f7691c9551be8169a2698f0a","modified":1722500710105},{"_id":"themes/butterfly/layout/includes/head/google_adsense.pug","hash":"f29123e603cbbcc6ce277d4e8f600ba67498077c","modified":1722500710109},{"_id":"themes/butterfly/layout/includes/head/config.pug","hash":"39e1ca0a54eb5fd3688a78737417a1aaa50914c9","modified":1722500710108},{"_id":"themes/butterfly/layout/includes/head/config_site.pug","hash":"bd5dd5452e28a4fe94c3241a758ec6f4fdb7a149","modified":1722500710109},{"_id":"themes/butterfly/layout/includes/head/pwa.pug","hash":"6dc2c9b85df9ab4f5b554305339fd80a90a6cf43","modified":1722500710113},{"_id":"themes/butterfly/layout/includes/head/preconnect.pug","hash":"a7c929b90ae52b78b39b1728e3ab0e3db1cb7b9a","modified":1722500710111},{"_id":"themes/butterfly/layout/includes/header/index.pug","hash":"1bef867c799ba158c5417272fb137539951aa120","modified":1722500710117},{"_id":"themes/butterfly/layout/includes/header/menu_item.pug","hash":"ca8bcd90ad9467819330bfe7c02b76322754bccf","modified":1722500710118},{"_id":"themes/butterfly/layout/includes/head/site_verification.pug","hash":"5168caadc4cf541f5d6676a9c5e8ae47a948f9ad","modified":1722500710114},{"_id":"themes/butterfly/layout/includes/header/nav.pug","hash":"962ee70a35e60a13c31eea47d16b9f98069fe417","modified":1722500710119},{"_id":"themes/butterfly/layout/includes/header/post-info.pug","hash":"cc99b2dc5c6b1f74391b0da609853ebc11de9610","modified":1722500710120},{"_id":"themes/butterfly/layout/includes/header/social.pug","hash":"7a641b5dd45b970e1dafd1433eb32ea149e55cf2","modified":1722500710121},{"_id":"themes/butterfly/layout/includes/loading/fullpage-loading.pug","hash":"766baca6ddce49d1724a02312387b292ff2d0bdc","modified":1722500710125},{"_id":"themes/butterfly/layout/includes/loading/index.pug","hash":"00ae419f527d8225a2dc03d4f977cec737248423","modified":1722500710126},{"_id":"themes/butterfly/layout/includes/loading/pace.pug","hash":"a6fde4835d6460ce7baf792fd5e1977fad73db25","modified":1722500710127},{"_id":"themes/butterfly/layout/includes/page/categories.pug","hash":"5276a8d2835e05bd535fedc9f593a0ce8c3e8437","modified":1722500710131},{"_id":"themes/butterfly/layout/includes/page/default-page.pug","hash":"e9459f122af7b733398578f9f0f8ab3c5e12a217","modified":1722500710131},{"_id":"themes/butterfly/layout/includes/page/flink.pug","hash":"e37681bc9c169d4220f26ecda2b3d5c02b6b9a0f","modified":1722500710133},{"_id":"themes/butterfly/layout/includes/page/tags.pug","hash":"12be059c536490af216a397e8f2a7abbf6d4610e","modified":1722500710134},{"_id":"themes/butterfly/layout/includes/mixins/article-sort.pug","hash":"9155f01d4c644a2e19b2b13b2d3c6d5e34dd0abf","modified":1722500710128},{"_id":"themes/butterfly/layout/includes/mixins/post-ui.pug","hash":"90eb453b14f6b5c25bfd8d28aa67783603a1411d","modified":1722500710129},{"_id":"themes/butterfly/layout/includes/post/post-copyright.pug","hash":"0abad416b1974a17e5be7817931d5fe799180170","modified":1722500710136},{"_id":"themes/butterfly/layout/includes/post/reward.pug","hash":"912df10a053db3135968e92b6fd1a707ee94c968","modified":1722500710138},{"_id":"themes/butterfly/layout/includes/third-party/effect.pug","hash":"43014bfc63583d3ee8808d526dd165848c0ed52f","modified":1722500710177},{"_id":"themes/butterfly/layout/includes/third-party/prismjs.pug","hash":"08979afbfecb4476a5ae8e360947b92624d285b8","modified":1722500710194},{"_id":"themes/butterfly/layout/includes/third-party/aplayer.pug","hash":"e939344fd389aeb11864ee697d5fd9b036d8325f","modified":1722500710146},{"_id":"themes/butterfly/layout/includes/third-party/pangu.pug","hash":"f0898509da70388b5c532f19e762756d74080200","modified":1722500710191},{"_id":"themes/butterfly/layout/includes/third-party/pjax.pug","hash":"9b734d99963f3e7f562597dcf60485ccbf6e961c","modified":1722500710192},{"_id":"themes/butterfly/layout/includes/widget/card_archives.pug","hash":"73d33b6930e7944187a4b3403daf25d27077a2dd","modified":1722500710206},{"_id":"themes/butterfly/layout/includes/third-party/subtitle.pug","hash":"dfb5e16a7e7106bb20b2ac2d0df1251d0fc79609","modified":1722500710202},{"_id":"themes/butterfly/layout/includes/widget/card_announcement.pug","hash":"21e019bdc3b1e796bb00976bb29af2d51f873624","modified":1722500710205},{"_id":"themes/butterfly/layout/includes/widget/card_ad.pug","hash":"a8312b527493dabbadbb1280760168d3bc909a3b","modified":1722500710204},{"_id":"themes/butterfly/layout/includes/widget/card_bottom_self.pug","hash":"1dba77d250eeebfb6e293d504352c7e9ea31980b","modified":1722500710208},{"_id":"themes/butterfly/layout/includes/widget/card_author.pug","hash":"ab037bf5794638bd30da4cf7cf106e5d03b5f696","modified":1722500710207},{"_id":"themes/butterfly/layout/includes/widget/card_categories.pug","hash":"66e383b4ef374951eb87dd1bf4cdb7a667193fb5","modified":1722500710209},{"_id":"themes/butterfly/layout/includes/widget/card_newest_comment.pug","hash":"8e22f53886a57a68286970d8af8b4c950fd4a1d7","modified":1722500710210},{"_id":"themes/butterfly/layout/includes/widget/card_post_toc.pug","hash":"d48d77af1670bd568d784794408bf524a448bfcc","modified":1722500710211},{"_id":"themes/butterfly/layout/includes/widget/card_post_series.pug","hash":"e0bb72fa0ce15964b11b8fe421cae3432394e35f","modified":1722500710210},{"_id":"themes/butterfly/layout/includes/widget/card_recent_post.pug","hash":"bb842d2aa6469d65bf06af1372f0a19a9e4ef44c","modified":1722500710214},{"_id":"themes/butterfly/layout/includes/widget/card_tags.pug","hash":"842b772a387b576550fa127030e1c2e9bf65716d","modified":1722500710215},{"_id":"themes/butterfly/layout/includes/widget/card_top_self.pug","hash":"7b5ae404a1205546b7de4be42291315cf918f2b3","modified":1722500710215},{"_id":"themes/butterfly/layout/includes/widget/card_webinfo.pug","hash":"12185713f9ca08984fc74e3b69d8cd6828d23da8","modified":1722500710216},{"_id":"themes/butterfly/layout/includes/widget/index.pug","hash":"8df529f71e25f1c0a00e533de7944ed3d1ba7bd8","modified":1722500710217},{"_id":"themes/butterfly/source/css/_global/function.styl","hash":"e920dae9ce00177922468db49240f5aca0af4f64","modified":1722500710258},{"_id":"themes/butterfly/source/css/_global/index.styl","hash":"0421da07907b3d98df64239e073b23fbb3f04149","modified":1722500710259},{"_id":"themes/butterfly/source/css/_highlight/highlight.styl","hash":"41054740cfbd1357138785464f6859681ca58493","modified":1722500710260},{"_id":"themes/butterfly/source/css/_highlight/theme.styl","hash":"3c178608406c31d768af355ef1d7326da37cc75f","modified":1722500710268},{"_id":"themes/butterfly/source/css/_layout/aside.styl","hash":"aae70ddd126b2e40158e45036abecbfa33cbfbba","modified":1722500710270},{"_id":"themes/butterfly/source/css/_layout/chat.styl","hash":"792a04d36de32f230ca3256ad87a90fe8392f333","modified":1722500710272},{"_id":"themes/butterfly/source/css/_layout/comments.styl","hash":"fbfce4d67cacd1df22fb73d89d008693f59d9d91","modified":1722500710273},{"_id":"themes/butterfly/source/css/_layout/head.styl","hash":"dd5d9a5631b682610ea699541b8246ceaa56fddb","modified":1723206391886},{"_id":"themes/butterfly/source/css/_layout/footer.styl","hash":"5e27f7842af82ff7498d4b59787ce9ca90fa9e6f","modified":1722500710275},{"_id":"themes/butterfly/source/css/_layout/post.styl","hash":"7ae27854a737a02eca89b0b92db94cb298fef59e","modified":1722500710280},{"_id":"themes/butterfly/source/css/_layout/pagination.styl","hash":"bd099f7d3adef4b7edd24c0a25a07415b156e587","modified":1722500710278},{"_id":"themes/butterfly/source/css/_layout/relatedposts.styl","hash":"6dcf19c0933c8828a439f801b0f4b256447dec07","modified":1722500710281},{"_id":"themes/butterfly/source/css/_layout/loading.styl","hash":"f0b01bbf321c2c24fdccaee367dd9fd448031a72","modified":1722500710277},{"_id":"themes/butterfly/source/css/_layout/reward.styl","hash":"c0b11a1a5f52e3a6af4e312a8134c93eda18a7dd","modified":1722500710281},{"_id":"themes/butterfly/source/css/_layout/rightside.styl","hash":"0322237e762db401d7b4aa33168d0b9334a9ec26","modified":1722500710282},{"_id":"themes/butterfly/source/css/_layout/sidebar.styl","hash":"80ee9d0bfe5d38aac1f0cdcea5fc88b71d310041","modified":1722500710283},{"_id":"themes/butterfly/source/css/_layout/third-party.styl","hash":"15ea7564b2e3bf46bc91fb6e49c94d057b37caaf","modified":1722500710283},{"_id":"themes/butterfly/source/css/_mode/darkmode.styl","hash":"dbc855795a881f8c805bf5c9c5c4d5d542a648ec","modified":1722500710286},{"_id":"themes/butterfly/source/css/_mode/readmode.styl","hash":"a22fd15048d21452f0015d0765d295d730203308","modified":1723078297080},{"_id":"themes/butterfly/source/css/_page/404.styl","hash":"a7223a8fcc4fa7b81e552c9a2554be7df9de312e","modified":1722500710289},{"_id":"themes/butterfly/source/css/_page/archives.styl","hash":"5dd1ba997741d02894ff846eda939ad8051c0bb2","modified":1722500710290},{"_id":"themes/butterfly/source/css/_page/common.styl","hash":"df7a51fcabbadab5aa31770e3202a47c9599bbb7","modified":1722500710293},{"_id":"themes/butterfly/source/css/_page/categories.styl","hash":"68bc8cbea25dbb3cdc170f09f9b43ce130547717","modified":1722500710292},{"_id":"themes/butterfly/source/css/_page/flink.styl","hash":"ecc2b2e28c179eb9406fc2c6f00e141078249cdd","modified":1722500710294},{"_id":"themes/butterfly/source/css/_page/tags.styl","hash":"9e35f91847773b915c74a78b8aa66c7bdb950ad0","modified":1722500710296},{"_id":"themes/butterfly/source/css/_page/homepage.styl","hash":"a977cd8161ef4d6ddd5293e81403519076657430","modified":1722500710295},{"_id":"themes/butterfly/source/css/_search/index.styl","hash":"0b23010154e19f37f0c4af0110f9f834d6d41a13","modified":1722500710298},{"_id":"themes/butterfly/source/css/_search/algolia.styl","hash":"37db99299af380e9111dce2a78a5049b301b13e0","modified":1722500710298},{"_id":"themes/butterfly/source/css/_search/local-search.styl","hash":"8a53d7ba5ca2f5eb4124b684e7845b648583f658","modified":1722500710301},{"_id":"themes/butterfly/source/css/_tags/button.styl","hash":"62da1de0d5b8453fcecbfacddb16985265638ba5","modified":1722500710302},{"_id":"themes/butterfly/source/css/_tags/gallery.styl","hash":"3e9355b76f87e2ee90f652855282b37ab5ae0b3e","modified":1722500710304},{"_id":"themes/butterfly/source/css/_tags/hexo.styl","hash":"985b183db7b7bfd8f9bdb60494549fb7f850348b","modified":1722500710305},{"_id":"themes/butterfly/source/css/_tags/hide.styl","hash":"b7cf7753479fcf2fe07287ffdb0e568adbba4c18","modified":1722500710306},{"_id":"themes/butterfly/source/css/_tags/inlineImg.styl","hash":"5a873d01fabebcf7ddf7a6b1c2e2e5e2714097f4","modified":1722500710307},{"_id":"themes/butterfly/source/css/_tags/note.styl","hash":"4929382bd60788d34752a66e2fe764ef797a72a0","modified":1722500710308},{"_id":"themes/butterfly/source/css/_tags/label.styl","hash":"2f83bd145b870d80d4b18b0ac603235229a5694e","modified":1722500710307},{"_id":"themes/butterfly/source/css/_tags/tabs.styl","hash":"353b95f9a6c2c1e777d978118cb61f909ccbf89c","modified":1722500710309},{"_id":"themes/butterfly/source/css/_tags/timeline.styl","hash":"07ea7134db7a66c87658116f089fb1a2a6906563","modified":1722500710310},{"_id":"themes/butterfly/source/css/_third-party/normalize.min.css","hash":"8549829fb7d3c21cd9e119884962e8c463a4a267","modified":1722500710312},{"_id":"themes/butterfly/source/js/search/algolia.js","hash":"a7c2fe73cc05ad3525909b86ad0ede1a9f2d3b48","modified":1722500710323},{"_id":"themes/butterfly/layout/includes/third-party/abcjs/abcjs.pug","hash":"8f95aca305b56ccd7c8c7367b03d26db816ebd5f","modified":1722500710143},{"_id":"themes/butterfly/source/js/search/local-search.js","hash":"ab3904451ae1d78903424b8b2ef815c8571e1749","modified":1722500710325},{"_id":"themes/butterfly/layout/includes/third-party/abcjs/index.pug","hash":"58f37823f6cd9a194fb50f7ca7c2233e49939034","modified":1722500710144},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/disqus.pug","hash":"d6fff5a7f84c8b09f282f9ddc0020a68a8aac9ea","modified":1722500710148},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/artalk.pug","hash":"b03ee8625149191f9d5d057bbc9824b68d8dd0c4","modified":1722500710147},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/index.pug","hash":"846cabae287ae31b3bbfac3da022475713dd5ecc","modified":1722500710151},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/fb.pug","hash":"4b98145d6584d586cabf033493282afc72ae816a","modified":1722500710149},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/remark42.pug","hash":"716dc463fe4ef5112e7018ed60804125fdfa5cad","modified":1722500710151},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/valine.pug","hash":"cd4fc9c5a61608a5dedf645c1295430a1623040f","modified":1722500710153},{"_id":"themes/butterfly/layout/includes/third-party/chat/chatra.pug","hash":"08a85e52fc800d3562df869e5e2613313e76fce6","modified":1722500710156},{"_id":"themes/butterfly/layout/includes/third-party/chat/crisp.pug","hash":"09d2ab2570b67e6f09244a898ccab5567cb82ace","modified":1722500710156},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/twikoo.pug","hash":"7e233f872aea6fd6beccdc9efd86b1bf9ec9f12d","modified":1722500710152},{"_id":"themes/butterfly/layout/includes/third-party/chat/daovoice.pug","hash":"0d960849d5b05d27ec87627b983ca35f2411b9e8","modified":1722500710158},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/waline.pug","hash":"fd2320ee25507bb8ef49f932c2d170586b44ea4d","modified":1722500710154},{"_id":"themes/butterfly/layout/includes/third-party/chat/index.pug","hash":"1157118db9f5d7c0c5a0fc7c346f6e934ca00d52","modified":1722500710159},{"_id":"themes/butterfly/layout/includes/third-party/chat/messenger.pug","hash":"799da8f3015e6fe440681b21644bcb3810a5518c","modified":1722500710160},{"_id":"themes/butterfly/layout/includes/third-party/chat/tidio.pug","hash":"6d40b521eec4136f6742c548a4445ed593470b1b","modified":1722500710161},{"_id":"themes/butterfly/layout/includes/third-party/math/index.pug","hash":"2afa4c21dd19890f47fb568cfb0d90efb676a253","modified":1722500710179},{"_id":"themes/butterfly/layout/includes/third-party/math/katex.pug","hash":"f0d3eddd2bed68e5517274b3530bfe0fa5057d8e","modified":1722500710180},{"_id":"themes/butterfly/layout/includes/third-party/math/mathjax.pug","hash":"bb944185f4bb9f9a9b9d70ee215f66ccd6d4c6cf","modified":1722500710181},{"_id":"themes/butterfly/layout/includes/third-party/math/mermaid.pug","hash":"c682e4d61017fb0dd2e837bfcc242371f1a13364","modified":1722500710182},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/disqus-comment.pug","hash":"d8898e427acd91ceb97d6a7ee3acb011ca86b9fc","modified":1722500710184},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/artalk.pug","hash":"2e36fac4791e99844cd56676898be0dbf5eb4e99","modified":1722500710184},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/index.pug","hash":"f8b65460c399973090c1fb7ab81e3708c252e7cc","modified":1722500710187},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/github-issues.pug","hash":"fc8814bd016d039874ec2fc24dcb78587892e2a6","modified":1722500710186},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/twikoo-comment.pug","hash":"17520a86de12ae585289463c066d3ac91b78a2ff","modified":1722500710188},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/waline.pug","hash":"0544d91c0bc9e26e0fe1b5ff490f4a8540ed1ee1","modified":1722500710191},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/remark42.pug","hash":"a4e52188b6effeee1df2a01dcbf4105de76a61a8","modified":1722500710188},{"_id":"themes/butterfly/layout/includes/third-party/comments/artalk.pug","hash":"5373b822aa72ddb96f2f1f4baf6c058b40d705d6","modified":1722500710163},{"_id":"themes/butterfly/layout/includes/third-party/comments/disqusjs.pug","hash":"f78c9c20c86d58c7cf099f6f8d6097103d7d43e5","modified":1722500710165},{"_id":"themes/butterfly/layout/includes/third-party/comments/facebook_comments.pug","hash":"11f5dca1432e59f22955aaf4ac3e9de6b286d887","modified":1722500710166},{"_id":"themes/butterfly/layout/includes/third-party/comments/index.pug","hash":"db6713d2b90eb8183f86ac92c26761a8501c0ddb","modified":1722500710169},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/valine.pug","hash":"ecfff55b2c7f6d87ce4d5028fdf9f8c0bf155c73","modified":1722500710189},{"_id":"themes/butterfly/layout/includes/third-party/comments/disqus.pug","hash":"364d1fd655baca9132038ef1e312abde2c0bc7de","modified":1722500710164},{"_id":"themes/butterfly/layout/includes/third-party/comments/gitalk.pug","hash":"1c86c8fc1a28514a02a1f6a25ca9ec05eb3955b7","modified":1722500710168},{"_id":"themes/butterfly/layout/includes/third-party/comments/js.pug","hash":"3abbaaa4ea575c45b3cebffd40bad1acc6ffce84","modified":1722500710170},{"_id":"themes/butterfly/layout/includes/third-party/comments/giscus.pug","hash":"1eab7ca1cb16c6786f9c3ca0efef8cc15e444ab4","modified":1722500710167},{"_id":"themes/butterfly/layout/includes/third-party/comments/remark42.pug","hash":"7f450664e6323a076ae59c393b0f22167cfa82e5","modified":1722500710172},{"_id":"themes/butterfly/layout/includes/third-party/comments/twikoo.pug","hash":"9942a903227350960c1d0716e59516ae79ac24a8","modified":1722500710173},{"_id":"themes/butterfly/layout/includes/third-party/comments/utterances.pug","hash":"b65a42167df5fb07e2a63f312a58c321d3112a90","modified":1722500710174},{"_id":"themes/butterfly/layout/includes/third-party/comments/waline.pug","hash":"efb72547fc2d470a124f5636391128dc59627498","modified":1722500710176},{"_id":"themes/butterfly/layout/includes/third-party/comments/valine.pug","hash":"4ed7c74087e81c6fcaf4fca7dced58b4e19f4cb1","modified":1722500710175},{"_id":"themes/butterfly/layout/includes/third-party/search/docsearch.pug","hash":"52a06a2e039f44383085333cac69f3f4e7d0ad3a","modified":1722500710196},{"_id":"themes/butterfly/layout/includes/third-party/search/algolia.pug","hash":"90338ac4cd114d324fe1caaaeea8be9ca05d6a46","modified":1722500710195},{"_id":"themes/butterfly/layout/includes/third-party/search/index.pug","hash":"3adcf28a8d205ea3ee19828eda0e668702fac07a","modified":1722500710197},{"_id":"themes/butterfly/layout/includes/third-party/search/local-search.pug","hash":"420a86e73d0d748ac234fd00d06d9e433ca5e3f2","modified":1722500710198},{"_id":"themes/butterfly/layout/includes/third-party/share/addtoany.pug","hash":"1f02a26730e5f36cc2dfec7ff4d5c93a099ed5ba","modified":1722500710199},{"_id":"themes/butterfly/layout/includes/third-party/share/index.pug","hash":"c16ee69b5ca8db016db0508d014ae0867c4ce929","modified":1722500710201},{"_id":"themes/butterfly/layout/includes/third-party/share/share-js.pug","hash":"8106bd031586f075a994956ee4438eb13be25d7b","modified":1722500710202},{"_id":"themes/butterfly/layout/includes/third-party/comments/livere.pug","hash":"09c2ef4bc6d005f96dfa48b1d9af1ec095c5266d","modified":1722500710171},{"_id":"themes/butterfly/source/css/_highlight/highlight/diff.styl","hash":"6e77f1ca0cfb0db6b028f5c0238780e66d344f3d","modified":1722500710263},{"_id":"themes/butterfly/source/css/_highlight/highlight/index.styl","hash":"fc702a4614d0562a381907b083f71ba63d301d86","modified":1722500710264},{"_id":"themes/butterfly/source/css/_highlight/prismjs/diff.styl","hash":"1309292f1c8c53d96cd7333507b106bcc24ca8fc","modified":1722500710265},{"_id":"themes/butterfly/source/css/_highlight/prismjs/index.styl","hash":"01ff9e77eb1bd454bec65a6ff5972c8e219bc708","modified":1722500710267},{"_id":"themes/butterfly/source/css/_highlight/prismjs/line-number.styl","hash":"7c9cc43e1d2577f7151039d58e603c30860fd281","modified":1722500710267},{"_id":"source/img/peiqian.png","hash":"2f077f1fff014ee448cd58b57ff83901702e2d88","modified":1723000686874},{"_id":"source/img/dingyue.png","hash":"c6afcd1124d84f07caeefcb895be3f3a5b301678","modified":1723001642809},{"_id":"source/img/site01.jpg","hash":"d93084432feb123fd5d781210c3a2c4db43c10ec","modified":1722524985872},{"_id":"public/search.xml","hash":"6c47e541d6f5d9c21d8f8033fdb15e58352218ab","modified":1736760832372},{"_id":"public/categories/index.html","hash":"4cdbd3cb944cfabfbed8e61e3a37b5c9b83b7396","modified":1736760832372},{"_id":"public/archives/page/2/index.html","hash":"52900ebafb545e0544a69294722a53d0e719cac1","modified":1736760832372},{"_id":"public/archives/2024/page/2/index.html","hash":"b1d54022b8542395a54eb967ea66d6f461f68728","modified":1736760832372},{"_id":"public/archives/2024/08/index.html","hash":"a1f0151ea236b85f139826b78163ae1cebc2ad7f","modified":1736760832372},{"_id":"public/archives/2024/09/index.html","hash":"15abbace3514580e4a17d5a81f84675f885bd246","modified":1736760832372},{"_id":"public/archives/2025/index.html","hash":"978c0ae589f419937f06fd104af992f35b2ef1d7","modified":1736760832372},{"_id":"public/archives/2025/01/index.html","hash":"33a860eda36b5f3ea9a591137ae3aef7fc025d8f","modified":1736760832372},{"_id":"public/categories/古文观止/index.html","hash":"67f8d76d169c9a10fdfd3c5be7e9625a67f9a646","modified":1736760832372},{"_id":"public/tags/machinelearning/index.html","hash":"afa7ad5f79c8968ab0e9426d33a4ad18eee12092","modified":1736760832372},{"_id":"public/tags/uniapp/index.html","hash":"c2aecb7e9071741459b058c9cbe1622c284ff2d1","modified":1736760832372},{"_id":"public/tags/古文观止/index.html","hash":"4968d2c4b32f679b03f16b70e1ea2f651daa2fb0","modified":1736760832372},{"_id":"public/tags/网络代理/index.html","hash":"ba02763e1f1cda7ce2ee2bd96c317e5363dfedba","modified":1736760832372},{"_id":"public/about/index.html","hash":"9b1b4ee3ea7386ee99c3dcaf9bc0e32ba7495b79","modified":1736760832372},{"_id":"public/music/index.html","hash":"38ba22982df8db5056af970dc8c04746cc6fc1f2","modified":1736760832372},{"_id":"public/movies/index.html","hash":"6c44d8c9acaacf47c729fbe0c33a896b3408788b","modified":1736760832372},{"_id":"public/link/index.html","hash":"f006ae384abf752c60be2003ecdea9b0eca170be","modified":1736760832372},{"_id":"public/tags/index.html","hash":"16d82ba045f2c6414335f7a4d279247e7ec4bd93","modified":1736760832372},{"_id":"public/posts/29139.html","hash":"fb4e521de1db94e75296bcf43509e1dc87979898","modified":1736760832372},{"_id":"public/posts/61253.html","hash":"41838e1ac5a53e039dfe8619cf71c30e71fe0bc9","modified":1736760832372},{"_id":"public/posts/61252.html","hash":"dbfd21919d87b6d964aee3ad71161d425d03c69f","modified":1736760832372},{"_id":"public/posts/61251.html","hash":"77bf07a82f0f5fa1d0e313bac2afbe8468599501","modified":1736760832372},{"_id":"public/posts/16107.html","hash":"72cf9ac313bdbc4d9967231cdfa015c43e04413c","modified":1736760832372},{"_id":"public/posts/34849.html","hash":"69833167d2006a4d8bcc9b0aa34615624371d3b3","modified":1736760832372},{"_id":"public/posts/58638.html","hash":"1ac6b0b4d5d8a3368d6df1afdf0029eaaf5c7080","modified":1736760832372},{"_id":"public/posts/41168.html","hash":"6084f859746d14a740f4f0d12394c63a8dd95061","modified":1736760832372},{"_id":"public/posts/14011.html","hash":"f7aa3abf070be95b9d2b95a1640745166669bdd9","modified":1736760832372},{"_id":"public/posts/58817.html","hash":"e575a3cf297bb75c2d4163e92abcaacbb8a67120","modified":1736760832372},{"_id":"public/posts/33957.html","hash":"5ae8f7f9b1ad2b3fcd7d741198f38753672e647a","modified":1736760832372},{"_id":"public/posts/47807.html","hash":"ff8732adbc561017db8fcd9574c8b00ae8252f80","modified":1736760832372},{"_id":"public/posts/1441.html","hash":"68d3256ab21e0f636aa3cf367767c90004cfaa16","modified":1736760832372},{"_id":"public/archives/index.html","hash":"9153281bb901f9633c8df273e9644321aadf7a53","modified":1736760832372},{"_id":"public/archives/2024/index.html","hash":"b110092938c04342743fb57f5257f3e537933666","modified":1736760832372},{"_id":"public/index.html","hash":"6b2a8dbc88547a054a03925f1b0dc56d211ccb01","modified":1736760832372},{"_id":"public/page/2/index.html","hash":"557902c44e4db3b45a61b0290f53df17292d68da","modified":1736760832372},{"_id":"public/img/favicon.png","hash":"3cf89864b4f6c9b532522a4d260a2e887971c92d","modified":1736760832372},{"_id":"public/img/friend_404.gif","hash":"8d2d0ebef70a8eb07329f57e645889b0e420fa48","modified":1736760832372},{"_id":"public/img/404.jpg","hash":"fb4489bc1d30c93d28f7332158c1c6c1416148de","modified":1736760832372},{"_id":"public/img/avatar.jpg","hash":"cb0941101c6a6b8f762ce6ffc3c948641e7f642f","modified":1736760832372},{"_id":"public/js/search/local-search.js","hash":"e1f60ebac53a3f596fd0a4769b4f9275c48c6542","modified":1736760832372},{"_id":"public/js/search/algolia.js","hash":"108988d046da9a4716148df43b3975217c8ceaae","modified":1736760832372},{"_id":"public/css/var.css","hash":"da39a3ee5e6b4b0d3255bfef95601890afd80709","modified":1736760832372},{"_id":"public/js/utils.js","hash":"8e6b48d294e7aeaba8ff6348c43b2271cf865547","modified":1736760832372},{"_id":"public/css/index.css","hash":"a86592daf1fcadb01092b449a0eb5100fc2351fb","modified":1736760832372},{"_id":"public/js/main.js","hash":"0dac585446445e0c419b86eec5580bc9b0657dc6","modified":1736760832372},{"_id":"public/js/tw_cn.js","hash":"f8d2e3f31468991a7f5171cbfdb157dfb86d3372","modified":1736760832372},{"_id":"public/img/000001.png","hash":"ad86c3b72174364d462bdab1d09540bd79eb123c","modified":1736760832372},{"_id":"public/img/yiyuan.png","hash":"817a89509a8ebcddff6b369979d53ecf44a30a9f","modified":1736760832372},{"_id":"public/img/peiqian.png","hash":"2f077f1fff014ee448cd58b57ff83901702e2d88","modified":1736760832372},{"_id":"public/img/dingyue.png","hash":"c6afcd1124d84f07caeefcb895be3f3a5b301678","modified":1736760832372},{"_id":"public/img/site01.jpg","hash":"d93084432feb123fd5d781210c3a2c4db43c10ec","modified":1736760832372}],"Category":[{"name":"古文观止","_id":"cm5uuk5pb000lgkahbjmh60g8"}],"Data":[{"_id":"link","data":[{"class_name":"友情鏈接","class_desc":"那些人,那些事","link_list":[{"name":"Hexo","link":"https://hexo.io/zh-tw/","avatar":"https://d33wubrfki0l68.cloudfront.net/6657ba50e702d84afb32fe846bed54fba1a77add/827ae/logo.svg","descr":"快速、簡單且強大的網誌框架"}]},{"class_name":"網站","class_desc":"值得推薦的網站","link_list":[{"name":"Youtube","link":"https://www.youtube.com/","avatar":"https://i.loli.net/2020/05/14/9ZkGg8v3azHJfM1.png","descr":"視頻網站"},{"name":"Weibo","link":"https://www.weibo.com/","avatar":"https://i.loli.net/2020/05/14/TLJBum386vcnI1P.png","descr":"中國最大社交分享平台"},{"name":"Twitter","link":"https://twitter.com/","avatar":"https://i.loli.net/2020/05/14/5VyHPQqR6LWF39a.png","descr":"社交分享平台"}]}]}],"Page":[{"title":"About me","date":"2024-08-10T02:35:41.000Z","_content":"\n落花飞舞,翩若惊鸿。\n","source":"about/index.md","raw":"---\ntitle: About me\ndate: 2024-08-10 10:35:41\n---\n\n落花飞舞,翩若惊鸿。\n","updated":"2024-08-10T02:38:14.626Z","path":"about/index.html","comments":1,"layout":"page","_id":"cm5uuk5or0000gkah9t4pbrft","content":"

落花飞舞,翩若惊鸿。

\n","cover":false,"excerpt":"","more":"

落花飞舞,翩若惊鸿。

\n"},{"title":"categories","date":"2024-07-31T00:33:49.000Z","aside":false,"top_img":false,"type":"categories","_content":"\n### category","source":"categories/index.md","raw":"---\ntitle: categories\ndate: 2024-07-31 08:33:49\naside: false\ntop_img: false\ntype: \"categories\"\n---\n\n### category","updated":"2024-07-31T06:37:23.090Z","path":"categories/index.html","comments":1,"layout":"page","_id":"cm5uuk5oy0002gkah98pkb7ey","content":"

category

","cover":false,"excerpt":"","more":"

category

"},{"title":"Music","date":"2024-08-10T02:40:19.000Z","_content":"","source":"music/index.md","raw":"---\ntitle: Music\ndate: 2024-08-10 10:40:19\n---\n","updated":"2024-08-10T02:40:19.897Z","path":"music/index.html","comments":1,"layout":"page","_id":"cm5uuk5oz0004gkah0ance0sr","content":"","cover":false,"excerpt":"","more":""},{"title":"Movies","date":"2024-08-10T02:40:33.000Z","_content":"","source":"movies/index.md","raw":"---\ntitle: Movies\ndate: 2024-08-10 10:40:33\n---\n","updated":"2024-08-10T02:40:33.715Z","path":"movies/index.html","comments":1,"layout":"page","_id":"cm5uuk5p10006gkaha1wk8a0w","content":"","cover":false,"excerpt":"","more":""},{"title":"link","date":"2024-08-10T02:42:35.000Z","type":"link","_content":"","source":"link/index.md","raw":"---\ntitle: link\ndate: 2024-08-10 10:42:35\ntype: \"link\"\n---\n","updated":"2024-08-10T02:42:48.927Z","path":"link/index.html","comments":1,"layout":"page","_id":"cm5uuk5p40009gkah519l94dh","content":"","cover":false,"excerpt":"","more":""},{"title":"tags","date":"2024-07-31T00:32:38.000Z","type":"tags","comments":0,"top_img":false,"_content":"","source":"tags/index.md","raw":"---\ntitle: tags\ndate: 2024-07-31 08:32:38\ntype: \"tags\"\ncomments: false\ntop_img: false\n---\n","updated":"2024-07-31T00:33:00.076Z","path":"tags/index.html","layout":"page","_id":"cm5uuk5p5000bgkah1so256rl","content":"","cover":false,"excerpt":"","more":""}],"Post":[{"title":"Hello World","abbrlink":16107,"_content":"Welcome to [Hexo](https://hexo.io/)! This is your very first post. Check [documentation](https://hexo.io/docs/) for more info. If you get any problems when using Hexo, you can find the answer in [troubleshooting](https://hexo.io/docs/troubleshooting.html) or you can ask me on [GitHub](https://github.com/hexojs/hexo/issues).\n\n## Quick Start\n\n### Create a new post\n\n``` bash\n$ hexo new \"My New Post\"\n```\n\nMore info: [Writing](https://hexo.io/docs/writing.html)\n\n### Run server\n\n``` bash\n$ hexo server\n```\n\nMore info: [Server](https://hexo.io/docs/server.html)\n\n### Generate static files\n\n``` bash\n$ hexo generate\n```\n\nMore info: [Generating](https://hexo.io/docs/generating.html)\n\n### Deploy to remote sites\n\n``` bash\n$ hexo deploy\n```\n\nMore info: [Deployment](https://hexo.io/docs/one-command-deployment.html)\n","source":"_posts/hello-world.md","raw":"---\ntitle: Hello World\nabbrlink: 16107\n---\nWelcome to [Hexo](https://hexo.io/)! This is your very first post. Check [documentation](https://hexo.io/docs/) for more info. If you get any problems when using Hexo, you can find the answer in [troubleshooting](https://hexo.io/docs/troubleshooting.html) or you can ask me on [GitHub](https://github.com/hexojs/hexo/issues).\n\n## Quick Start\n\n### Create a new post\n\n``` bash\n$ hexo new \"My New Post\"\n```\n\nMore info: [Writing](https://hexo.io/docs/writing.html)\n\n### Run server\n\n``` bash\n$ hexo server\n```\n\nMore info: [Server](https://hexo.io/docs/server.html)\n\n### Generate static files\n\n``` bash\n$ hexo generate\n```\n\nMore info: [Generating](https://hexo.io/docs/generating.html)\n\n### Deploy to remote sites\n\n``` bash\n$ hexo deploy\n```\n\nMore info: [Deployment](https://hexo.io/docs/one-command-deployment.html)\n","slug":"hello-world","published":1,"date":"2024-09-11T00:01:10.419Z","updated":"2024-08-09T12:21:50.026Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5ou0001gkah0oj18czy","content":"

Welcome to Hexo! This is your very first post. Check documentation for more info. If you get any problems when using Hexo, you can find the answer in troubleshooting or you can ask me on GitHub.

\n

Quick Start

Create a new post

1
$ hexo new "My New Post"
\n\n

More info: Writing

\n

Run server

1
$ hexo server
\n\n

More info: Server

\n

Generate static files

1
$ hexo generate
\n\n

More info: Generating

\n

Deploy to remote sites

1
$ hexo deploy
\n\n

More info: Deployment

\n","cover":false,"excerpt":"","more":"

Welcome to Hexo! This is your very first post. Check documentation for more info. If you get any problems when using Hexo, you can find the answer in troubleshooting or you can ask me on GitHub.

\n

Quick Start

Create a new post

1
$ hexo new "My New Post"
\n\n

More info: Writing

\n

Run server

1
$ hexo server
\n\n

More info: Server

\n

Generate static files

1
$ hexo generate
\n\n

More info: Generating

\n

Deploy to remote sites

1
$ hexo deploy
\n\n

More info: Deployment

\n"},{"title":"page","abbrlink":1441,"date":"2024-08-01T01:00:10.000Z","_content":"\n- [deploy](./deploy)\n- ","source":"_posts/frontend/frontend.md","raw":"---\ntitle: page\nabbrlink: 1441\ndate: 2024-08-01 09:00:10\ntags:\n---\n\n- [deploy](./deploy)\n- ","slug":"frontend/frontend","published":1,"updated":"2024-08-09T12:21:50.023Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5oy0003gkah1ymyfne9","content":"\n","cover":false,"excerpt":"","more":"\n"},{"title":"机器学习","abbrlink":29139,"mathjax":true,"date":"2025-01-13T09:20:59.000Z","_content":"\n## **k近邻算法(K-Nearest Neighbors)KNN**\n将当前样本的类别归类于距离最近的**k**个样本的类别\n\n#### **距离公式(2维)**\n\n- 欧式距离\n$$\nd = \\sqrt{(x_1-y_1)^2 + (x_2 - y_2)^2}\n$$\n- 曼哈顿距离\n$$\nd = |x_1 - x_2| + |y_1 - y_2|\n$$\n- 切比雪夫距离\n$$\nd = \\max\\left(|x_1 - x_2|, |y_1 - y_2|\\right)\n$$\n#### k值选择问题\n\n| k值 | 影响 |\n| --- | ------------------ |\n| 越大 | 模型过拟合,准确率波动较大 |\n| 越小 | 模型欠拟合,准确率趋于稳定但可能较低 |\n### 特征预处理\n> 通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程 \n- 归一化\n 将数据变换到指定区间(默认是\\[0,1\\])\n $$ x' = \\frac{x- x_{\\text {min}}}{x_{\\text{max}} - x_{\\text{min}}} $$\n 若需要缩放到任意区间 \\(\\[a, b\\]\\),公式为: $$ x' = a + \\frac{(x - x_{\\text{min}}) \\cdot (b - a)}{x_{\\text{max}} - x_{\\text{min}}} $$\n 其中:\\( \\[a, b\\] \\):目标区间的范围\n 归一化受到数据集的异常值的影响,需要进行标准化处理(更加合理)\n ``` python\n\t from sklearn.preprocessing import MinMaxScaler # 归一化\n\t```\n- 标准化\n 将数据调整为均值为 0,标准差为 1 的标准正态分布\n $$ z = \\frac{x - \\mu}{\\sigma} $$\n \\( z \\):标准化后的值 \\( x \\):原始数据值 \\( $\\mu$ \\):数据的均值 \\( $\\sigma$\\):数据的标准差\n \n ``` python\n \t from sklearn.preprocessing import StandardScaler # 标准化\n ```\n ","source":"_posts/machinelearning/01.md","raw":"---\ntitle: 机器学习\ntags: machinelearning\nabbrlink: 29139\nmathjax: true\ndate: 2025-01-13 17:20:59\n---\n\n## **k近邻算法(K-Nearest Neighbors)KNN**\n将当前样本的类别归类于距离最近的**k**个样本的类别\n\n#### **距离公式(2维)**\n\n- 欧式距离\n$$\nd = \\sqrt{(x_1-y_1)^2 + (x_2 - y_2)^2}\n$$\n- 曼哈顿距离\n$$\nd = |x_1 - x_2| + |y_1 - y_2|\n$$\n- 切比雪夫距离\n$$\nd = \\max\\left(|x_1 - x_2|, |y_1 - y_2|\\right)\n$$\n#### k值选择问题\n\n| k值 | 影响 |\n| --- | ------------------ |\n| 越大 | 模型过拟合,准确率波动较大 |\n| 越小 | 模型欠拟合,准确率趋于稳定但可能较低 |\n### 特征预处理\n> 通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程 \n- 归一化\n 将数据变换到指定区间(默认是\\[0,1\\])\n $$ x' = \\frac{x- x_{\\text {min}}}{x_{\\text{max}} - x_{\\text{min}}} $$\n 若需要缩放到任意区间 \\(\\[a, b\\]\\),公式为: $$ x' = a + \\frac{(x - x_{\\text{min}}) \\cdot (b - a)}{x_{\\text{max}} - x_{\\text{min}}} $$\n 其中:\\( \\[a, b\\] \\):目标区间的范围\n 归一化受到数据集的异常值的影响,需要进行标准化处理(更加合理)\n ``` python\n\t from sklearn.preprocessing import MinMaxScaler # 归一化\n\t```\n- 标准化\n 将数据调整为均值为 0,标准差为 1 的标准正态分布\n $$ z = \\frac{x - \\mu}{\\sigma} $$\n \\( z \\):标准化后的值 \\( x \\):原始数据值 \\( $\\mu$ \\):数据的均值 \\( $\\sigma$\\):数据的标准差\n \n ``` python\n \t from sklearn.preprocessing import StandardScaler # 标准化\n ```\n ","slug":"machinelearning/01","published":1,"updated":"2025-01-13T09:26:59.899Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5p00005gkahhr400nj5","content":"

k近邻算法(K-Nearest Neighbors)KNN

将当前样本的类别归类于距离最近的k个样本的类别

\n

距离公式(2维)

\n

k值选择问题

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
k值影响
越大模型过拟合,准确率波动较大
越小模型欠拟合,准确率趋于稳定但可能较低
\n

特征预处理

\n

通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程

\n
\n\n","cover":false,"excerpt":"","more":"

k近邻算法(K-Nearest Neighbors)KNN

将当前样本的类别归类于距离最近的k个样本的类别

\n

距离公式(2维)

\n

k值选择问题

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
k值影响
越大模型过拟合,准确率波动较大
越小模型欠拟合,准确率趋于稳定但可能较低
\n

特征预处理

\n

通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程

\n
\n\n"},{"title":"script","abbrlink":34849,"date":"2024-08-17T03:09:24.000Z","_content":"\n### 查看CPU、内存使用率\n```bash\n#!/bin/bash\n\n# 定义颜色\nRED='\\033[0;31m'\nGREEN='\\033[0;32m'\nYELLOW='\\033[0;33m'\nBLUE='\\033[0;34m'\nNC='\\033[0m' # 无颜色\n\nwhile true; do\n # 获取所有进程的CPU使用率和内存使用率\n cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')\n mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')\n \n # 打印结果,带有时间戳、分隔线和颜色高亮\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${YELLOW}Timestamp: $(date)${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}\"\n echo -e \"${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n \n # 等待5秒后再次执行\n sleep 5\ndone\n\n```\n**保存脚本到/usr/local/bin目录下**\n```bash\n mv usage.sh /usr/local/bin/usage\n```\n\n### Shell脚本编写的基本信息\n\n```bash\n#! /bin/bash\n# -------------------------------------------------\n# Filename: test.sh\n# Version: 1.0\n# Date: 2024/05/02\n# Author: shenjianZ\n# Email: shenjianZLT@gmail.com\n# Website: https://blog.shenjianl.cn\n# Description: this is a test shell\n# CopyRight: 2024 All rights reserved shenjianZ\n# License GPL\n# ------------------------------------------------\n\n\n# Your script logic goes here\n```","source":"_posts/linux/script.md","raw":"---\ntitle: script\nabbrlink: 34849\ndate: 2024-08-17 11:09:24\ntags:\n---\n\n### 查看CPU、内存使用率\n```bash\n#!/bin/bash\n\n# 定义颜色\nRED='\\033[0;31m'\nGREEN='\\033[0;32m'\nYELLOW='\\033[0;33m'\nBLUE='\\033[0;34m'\nNC='\\033[0m' # 无颜色\n\nwhile true; do\n # 获取所有进程的CPU使用率和内存使用率\n cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')\n mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')\n \n # 打印结果,带有时间戳、分隔线和颜色高亮\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${YELLOW}Timestamp: $(date)${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}\"\n echo -e \"${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n \n # 等待5秒后再次执行\n sleep 5\ndone\n\n```\n**保存脚本到/usr/local/bin目录下**\n```bash\n mv usage.sh /usr/local/bin/usage\n```\n\n### Shell脚本编写的基本信息\n\n```bash\n#! /bin/bash\n# -------------------------------------------------\n# Filename: test.sh\n# Version: 1.0\n# Date: 2024/05/02\n# Author: shenjianZ\n# Email: shenjianZLT@gmail.com\n# Website: https://blog.shenjianl.cn\n# Description: this is a test shell\n# CopyRight: 2024 All rights reserved shenjianZ\n# License GPL\n# ------------------------------------------------\n\n\n# Your script logic goes here\n```","slug":"linux/script","published":1,"updated":"2024-08-18T16:05:45.731Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5p20007gkah9z31ferj","content":"

查看CPU、内存使用率

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#!/bin/bash

# 定义颜色
RED='\\033[0;31m'
GREEN='\\033[0;32m'
YELLOW='\\033[0;33m'
BLUE='\\033[0;34m'
NC='\\033[0m' # 无颜色

while true; do
# 获取所有进程的CPU使用率和内存使用率
cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')
mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')

# 打印结果,带有时间戳、分隔线和颜色高亮
echo -e "${BLUE}==============================${NC}"
echo -e "${YELLOW}Timestamp: $(date)${NC}"
echo -e "${BLUE}==============================${NC}"
echo -e "${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}"
echo -e "${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}"
echo -e "${BLUE}==============================${NC}"

# 等待5秒后再次执行
sleep 5
done

\n

保存脚本到/usr/local/bin目录下

\n
1
mv usage.sh /usr/local/bin/usage
\n\n

Shell脚本编写的基本信息

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#! /bin/bash
# -------------------------------------------------
# Filename: test.sh
# Version: 1.0
# Date: 2024/05/02
# Author: shenjianZ
# Email: shenjianZLT@gmail.com
# Website: https://blog.shenjianl.cn
# Description: this is a test shell
# CopyRight: 2024 All rights reserved shenjianZ
# License GPL
# ------------------------------------------------


# Your script logic goes here
","cover":false,"excerpt":"","more":"

查看CPU、内存使用率

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#!/bin/bash

# 定义颜色
RED='\\033[0;31m'
GREEN='\\033[0;32m'
YELLOW='\\033[0;33m'
BLUE='\\033[0;34m'
NC='\\033[0m' # 无颜色

while true; do
# 获取所有进程的CPU使用率和内存使用率
cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')
mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')

# 打印结果,带有时间戳、分隔线和颜色高亮
echo -e "${BLUE}==============================${NC}"
echo -e "${YELLOW}Timestamp: $(date)${NC}"
echo -e "${BLUE}==============================${NC}"
echo -e "${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}"
echo -e "${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}"
echo -e "${BLUE}==============================${NC}"

# 等待5秒后再次执行
sleep 5
done

\n

保存脚本到/usr/local/bin目录下

\n
1
mv usage.sh /usr/local/bin/usage
\n\n

Shell脚本编写的基本信息

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#! /bin/bash
# -------------------------------------------------
# Filename: test.sh
# Version: 1.0
# Date: 2024/05/02
# Author: shenjianZ
# Email: shenjianZLT@gmail.com
# Website: https://blog.shenjianl.cn
# Description: this is a test shell
# CopyRight: 2024 All rights reserved shenjianZ
# License GPL
# ------------------------------------------------


# Your script logic goes here
"},{"title":"Docker被墙,如何继续使用?","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":47807,"date":"2024-08-01T01:10:40.000Z","_content":"\n## Docker Download\n> 自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。\n> 解决docker安装问题:https://github.com/shenjianZ/docker_installer\n\n1. 安装docker \n ```shell\n sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun\n ```\n \n2. 启动docker\n ```shell\n sudo systemctl start docker\n ```\n \n3. 设置开机自启\n ```shell\n sudo systemctl enable docker\n ```\n \n4. Docker pull images\n > 将image下载到阿里云镜像仓库中\n > 解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher\n \n 1. **登录阿里云镜像服务** https://cr.console.aliyun.com/,\n\n 启用个人实例,创建一个命名空间(`ALIYUN_NAME_SPACE`)\n\n 2. 在**访问凭证** 可以看到账号 用户名(`ALIYUN_REGISTRY_USER`)\n\n 密码(`ALIYUN_REGISTRY_PASSWORD`)\n\n 仓库地址(`ALIYUN_REGISTRY`)\n\n 3. **启动Action**\n 进入您自己的项目,点击`Action`,启用`Github Action`功能\n \n 4. **配置环境变量**\n 进入Settings->Secret and variables->Actions->New Repository secret\n 将上一步的四个值\n `ALIYUN_NAME_SPACE`,\n \n ` ALIYUN_REGISTRY_USER`,\n \n `ALIYUN_REGISTRY_PASSWORD`,\n \n `ALIYUN_REGISTRY`\n 配置成环境变量\n \n 5. **添加镜像**\n 打开`images.txt`文件,添加你想要的镜像 可以加tag\n \n 6. 使用镜像\n 回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)","source":"_posts/frontend/deploy/deploy.md","raw":"---\ntitle: Docker被墙,如何继续使用?\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 47807\ndate: 2024-08-01 09:10:40\ntags:\n---\n\n## Docker Download\n> 自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。\n> 解决docker安装问题:https://github.com/shenjianZ/docker_installer\n\n1. 安装docker \n ```shell\n sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun\n ```\n \n2. 启动docker\n ```shell\n sudo systemctl start docker\n ```\n \n3. 设置开机自启\n ```shell\n sudo systemctl enable docker\n ```\n \n4. Docker pull images\n > 将image下载到阿里云镜像仓库中\n > 解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher\n \n 1. **登录阿里云镜像服务** https://cr.console.aliyun.com/,\n\n 启用个人实例,创建一个命名空间(`ALIYUN_NAME_SPACE`)\n\n 2. 在**访问凭证** 可以看到账号 用户名(`ALIYUN_REGISTRY_USER`)\n\n 密码(`ALIYUN_REGISTRY_PASSWORD`)\n\n 仓库地址(`ALIYUN_REGISTRY`)\n\n 3. **启动Action**\n 进入您自己的项目,点击`Action`,启用`Github Action`功能\n \n 4. **配置环境变量**\n 进入Settings->Secret and variables->Actions->New Repository secret\n 将上一步的四个值\n `ALIYUN_NAME_SPACE`,\n \n ` ALIYUN_REGISTRY_USER`,\n \n `ALIYUN_REGISTRY_PASSWORD`,\n \n `ALIYUN_REGISTRY`\n 配置成环境变量\n \n 5. **添加镜像**\n 打开`images.txt`文件,添加你想要的镜像 可以加tag\n \n 6. 使用镜像\n 回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)","slug":"frontend/deploy/deploy","published":1,"updated":"2024-08-09T12:21:50.045Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5p4000agkah5fqwft6x","content":"

Docker Download

\n

自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。
解决docker安装问题:https://github.com/shenjianZ/docker_installer

\n
\n
    \n
  1. 安装docker

    \n
    1
    sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun
    \n
  2. \n
  3. 启动docker

    \n
    1
    sudo systemctl start docker
    \n
  4. \n
  5. 设置开机自启

    \n
    1
    sudo systemctl enable docker
    \n
  6. \n
  7. Docker pull images

    \n
    \n

    将image下载到阿里云镜像仓库中
    解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher

    \n
    \n
      \n
    1. 登录阿里云镜像服务 https://cr.console.aliyun.com/,

      \n

      启用个人实例,创建一个命名空间(ALIYUN_NAME_SPACE

      \n
    2. \n
    3. 访问凭证 可以看到账号 用户名(ALIYUN_REGISTRY_USER)

      \n

      密码(ALIYUN_REGISTRY_PASSWORD)

      \n

      仓库地址(ALIYUN_REGISTRY

      \n
    4. \n
    5. 启动Action
      进入您自己的项目,点击Action,启用Github Action功能

      \n
    6. \n
    7. 配置环境变量
      进入Settings->Secret and variables->Actions->New Repository secret
      将上一步的四个值
      ALIYUN_NAME_SPACE,

      \n

      ALIYUN_REGISTRY_USER

      \n

      ALIYUN_REGISTRY_PASSWORD

      \n

      ALIYUN_REGISTRY
      配置成环境变量

      \n
    8. \n
    9. 添加镜像
      打开images.txt文件,添加你想要的镜像 可以加tag

      \n
    10. \n
    11. 使用镜像
      回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)

      \n
    12. \n
    \n
  8. \n
\n","cover":false,"excerpt":"","more":"

Docker Download

\n

自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。
解决docker安装问题:https://github.com/shenjianZ/docker_installer

\n
\n
    \n
  1. 安装docker

    \n
    1
    sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun
    \n
  2. \n
  3. 启动docker

    \n
    1
    sudo systemctl start docker
    \n
  4. \n
  5. 设置开机自启

    \n
    1
    sudo systemctl enable docker
    \n
  6. \n
  7. Docker pull images

    \n
    \n

    将image下载到阿里云镜像仓库中
    解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher

    \n
    \n
      \n
    1. 登录阿里云镜像服务 https://cr.console.aliyun.com/,

      \n

      启用个人实例,创建一个命名空间(ALIYUN_NAME_SPACE

      \n
    2. \n
    3. 访问凭证 可以看到账号 用户名(ALIYUN_REGISTRY_USER)

      \n

      密码(ALIYUN_REGISTRY_PASSWORD)

      \n

      仓库地址(ALIYUN_REGISTRY

      \n
    4. \n
    5. 启动Action
      进入您自己的项目,点击Action,启用Github Action功能

      \n
    6. \n
    7. 配置环境变量
      进入Settings->Secret and variables->Actions->New Repository secret
      将上一步的四个值
      ALIYUN_NAME_SPACE,

      \n

      ALIYUN_REGISTRY_USER

      \n

      ALIYUN_REGISTRY_PASSWORD

      \n

      ALIYUN_REGISTRY
      配置成环境变量

      \n
    8. \n
    9. 添加镜像
      打开images.txt文件,添加你想要的镜像 可以加tag

      \n
    10. \n
    11. 使用镜像
      回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)

      \n
    12. \n
    \n
  8. \n
\n"},{"title":"网络相关","abbrlink":41168,"date":"2024-08-07T02:06:08.000Z","_content":"\n","source":"_posts/net/index.md","raw":"---\ntitle: 网络相关\nabbrlink: 41168\ndate: 2024-08-07 10:06:08\ntags:\n---\n\n","slug":"net/index","published":1,"updated":"2024-08-09T12:21:50.028Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5p5000cgkaheywzegp0","content":"","cover":false,"excerpt":"","more":""},{"title":"uniapp 开发","abbrlink":58817,"date":"2024-08-05T06:07:01.000Z","_content":"- [uniapp component](../component1)","source":"_posts/frontend/uniapp/uniapp.md","raw":"---\ntitle: uniapp 开发\ntags: uniapp\nabbrlink: 58817\ndate: 2024-08-05 14:07:01\n---\n- [uniapp component](../component1)","slug":"frontend/uniapp/uniapp","published":1,"updated":"2024-08-09T12:21:50.039Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5p6000dgkah0iah71hw","content":"\n","cover":false,"excerpt":"","more":"\n"},{"title":"组件使用","abbrlink":33957,"date":"2024-08-05T06:07:01.000Z","_content":"\n### 组件自动导入\n```json\n\t\"easycom\":{\n\t \"autoscan\": true,\n\t \"custom\": {\n\t \"^tui-(.*)\": \"@/components/thorui/tui-$1/tui-$1.vue\" // 匹配components目录内的vue文件\n\t }\n\t}\n```\n\n### `tui-sticky 吸顶容器` \n\n> 包含 以下 `tui` 组件 :\n> - tui-sticky\n> - tui-list-view\n> - tui-list-cell\n> \n\n```html\n\n \n \n \n \n \n \n\n\n\n```\n","source":"_posts/frontend/uniapp/component1.md","raw":"---\ntitle: 组件使用\ntags: uniapp\nabbrlink: 33957\ndate: 2024-08-05 14:07:01\n---\n\n### 组件自动导入\n```json\n\t\"easycom\":{\n\t \"autoscan\": true,\n\t \"custom\": {\n\t \"^tui-(.*)\": \"@/components/thorui/tui-$1/tui-$1.vue\" // 匹配components目录内的vue文件\n\t }\n\t}\n```\n\n### `tui-sticky 吸顶容器` \n\n> 包含 以下 `tui` 组件 :\n> - tui-sticky\n> - tui-list-view\n> - tui-list-cell\n> \n\n```html\n\n \n \n \n \n \n \n\n\n\n```\n","slug":"frontend/uniapp/component1","published":1,"updated":"2024-08-09T12:21:50.042Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5p9000hgkahdnb238fm","content":"

组件自动导入

1
2
3
4
5
6
"easycom":{
"autoscan": true,
"custom": {
"^tui-(.*)": "@/components/thorui/tui-$1/tui-$1.vue" // 匹配components目录内的vue文件
}
}
\n\n

tui-sticky 吸顶容器

\n

包含 以下 tui 组件 :

\n\n
\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
<tui-sticky :scrollTop="scrollTop" stickyHeight="104rpx" container>
<!-- header start -->
<template v-slot:header>
<view class="sticky-item">
<view class="setting">设置</view>
</view>
</template>
<!-- header end -->
<!--内容 start-->
<template v-slot:content>
<tui-list-view class="content">
<tui-list-cell :arrow="false">
<switch class='switch' checked color="#FFCC33" />
</tui-list-cell>
</tui-list-view>
</template>
<!--内容 end-->
</tui-sticky>

<script setup>
import { ref } from 'vue'
import { onPageScroll } from '@dcloudio/uni-app'

// 定义 scrollTop 响应式变量
const scrollTop = ref(0)
// 监听页面滚动事件
onPageScroll((e) => {
scrollTop.value = e.scrollTop
})
</script>
\n","cover":false,"excerpt":"","more":"

组件自动导入

1
2
3
4
5
6
"easycom":{
"autoscan": true,
"custom": {
"^tui-(.*)": "@/components/thorui/tui-$1/tui-$1.vue" // 匹配components目录内的vue文件
}
}
\n\n

tui-sticky 吸顶容器

\n

包含 以下 tui 组件 :

\n\n
\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
<tui-sticky :scrollTop="scrollTop" stickyHeight="104rpx" container>
<!-- header start -->
<template v-slot:header>
<view class="sticky-item">
<view class="setting">设置</view>
</view>
</template>
<!-- header end -->
<!--内容 start-->
<template v-slot:content>
<tui-list-view class="content">
<tui-list-cell :arrow="false">
<switch class='switch' checked color="#FFCC33" />
</tui-list-cell>
</tui-list-view>
</template>
<!--内容 end-->
</tui-sticky>

<script setup>
import { ref } from 'vue'
import { onPageScroll } from '@dcloudio/uni-app'

// 定义 scrollTop 响应式变量
const scrollTop = ref(0)
// 监听页面滚动事件
onPageScroll((e) => {
scrollTop.value = e.scrollTop
})
</script>
\n"},{"title":"郑伯克段于鄢","abbrlink":58638,"date":"2024-08-09T12:00:13.000Z","_content":"原文如下:\n\n      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。","source":"_posts/ancient/guwenguanzhi/1.md","raw":"---\ntitle: 郑伯克段于鄢\ntags: 古文观止\ncategories:\n - 古文观止\nabbrlink: 58638\ndate: 2024-08-09 20:00:13\n---\n原文如下:\n\n      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。","slug":"ancient/guwenguanzhi/1","published":1,"updated":"2024-08-10T02:31:03.678Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5p9000igkaha5d343gb","content":"

原文如下:

\n

      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。

\n","cover":false,"excerpt":"","more":"

原文如下:

\n

      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。

\n"},{"title":"Hadoop集群搭建基础环境","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":61253,"date":"2024-09-11T14:45:40.000Z","_content":"\n### 防火墙关闭\n```bash\n# 在 6 台主机执行\nsystemctl stop firewalld\nsystemctl disable firewalld\n```\n### 配置yum源\n- 下载 repo 文件:\n [Centos-7.repo](http://mirrors.aliyun.com/repo/Centos-7.repo)\n 并上传到`/tmp`,进入到`/tmp`\n- 备份并且替换系统的repo文件\n ``` bash\n \tcp Centos-7.repo /etc/yum.repos.d/ \n\tcd /etc/yum.repos.d/ \n\tmv CentOS-Base.repo CentOS-Base.repo.bak \n\tmv Centos-7.repo CentOS-Base.repo\n\t ```\n- 将`nn1`上的`CentOS-Base.repo`拷贝到其他主机\n ```bash\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn3:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s1:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s3:/etc/yum.repos.d\n ```\n- 执行yum源更新命令\n ```bash\n\t yum clean all\n\t yum makecache \n\t yum update -y \n\t```\n- 安装常用软件\n ```bash\n yum install -y openssh-server vim gcc gcc-c++ glibc-headers bzip2-devel lzo-devel curl wget openssh-clients zlib-devel autoconf automake cmake libtool openssl-devel fuse-devel snappy-devel telnet unzip zip net-tools.x86_64 firewalld systemd ntp unrar bzip2\n ```\n### JDK安装\n>注意需要在六台机器依次执行\n- 上传到`/tmp`目录下,安装\n ```bash\n cd /tmp\n rpm -ivh jdk-8u144-linux-x64.rpm\n ```\n- 配置环境变量\n ```bash\n ln -s /usr/java/jdk1.8.0_144/ /usr/java/jdk1.8\n echo 'export JAVA_HOME=/usr/java/jdk1.8' >> /etc/profile.d/myEnv.sh \n echo 'export PATH=$PATH:$JAVA_HOME/bin' >> /etc/profile.d/myEnv.sh \n source /etc/profile \n java -version\n ```\n### 修改主机名和主机名映射\n\n```bash\nvim /etc/hostname\n```\n6台机器分别为nn1、nn2、nn3、s1、s2、s3\n\n```bash\nvim /etc/hosts\n```\n\n修改为\n```text\n192.168.1.30 nn1\n192.168.1.31 nn2\n192.168.1.32 nn3\n192.168.1.33 s1\n192.168.1.34 s2\n192.168.1.35 s3\n```\n### 创建hadoop用户\n```bash\n#创建hadoop用户 \nuseradd hadoop \n#给hadoop用户设置密码: 12345678 \npasswd hadoop\n```\n### 禁止非 wheel 组用户切换到root,配置免密切换root\n- 修改/etc/pam.d/su配置\n ```bash\n sed -i 's/#auth\\t\\trequired\\tpam_wheel.so/auth\\t\\trequired\\tpam_wheel.so/g' '/etc/pam.d/su' \n sed -i 's/#auth\\t\\tsufficient\\tpam_wheel.so/auth\\t\\tsufficient\\tpam_wheel.so/g' '/etc/pam.d/su'\n ```\n- 修改/etc/login.defs文件\n ```bash\n echo \"SU_WHEEL_ONLY yes\" >> /etc/login.defs\n ```\n- 添加用户到管理员,禁止普通用户su 到 root\n ```bash\n #把hadoop用户加到wheel组里\n gpasswd -a hadoop wheel\n #查看wheel组里是否有hadoop用户\n cat /etc/group | grep wheel\n ```\n### 给hadoop用户,配置SSH密钥\n#### 配置hadoop用户ssh免密码登录到hadoop\n- 仅在`nn1`执行这段脚本命令即可\n 但是 `su - hadoop ` ,` mkdir ~/.ssh` 需要在其他主机执行一下\n ```bash\n #切换到hadoop用户 \n su - hadoop\n #生成ssh公私钥 \n ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''\n ssh-copy-id nn1\n ssh-copy-id nn2\n ssh-copy-id nn3\n ssh-copy-id s1\n ssh-copy-id s2\n ssh-copy-id s3\n scp /home/hadoop/.ssh/id_rsa hadoop@nn2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@nn3:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s1:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s3:/home/hadoop/.ssh\n ```\n#### 配置hadoop用户ssh免密码登录到root\n- 同上\n ```bash\n ssh-copy-id root@nn1\n ssh-copy-id root@ nn2\n ssh-copy-id root@nn3\n ssh-copy-id root@s1\n ssh-copy-id root@s2\n ssh-copy-id root@s3\n scp /home/hadoop/.ssh/id_rsa root@nn2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@nn3:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s1:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s3:/root/.ssh\n ```\n### 脚本配置\n- **ips**\n ```bash\n vim /home/hadoop/bin/ips\n ```\n \n ```bash\n nn1 \n nn2\n nn3\n s1 \n s2 \n s3\n ```\n- **ssh_all.sh**\n ```bash\n vim /home/hadoop/bin/ssh_all.sh\n ```\n\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} \\\"$*\\\" \"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **ssh_root.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} ~/bin/exe.sh \\\"$*\\\"\"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **scp_all.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 源\n source_=$1\n # 目标\n target=$2\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接scp命令: scp 源 hadoop@nn1.hadoop:目标\n cmd_=\"scp -r ${source_} hadoop@${ip}:${target}\"\n echo $cmd_\n # 通过eval命令 执行 拼接的scp 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **exe.sh**\n ```bash\n #切换到root用户执行cmd命令\n cmd=$*\n su - << EOF\n $cmd\n EOF\n ```\n- 赋予执行权限\n ```bash\n chmod +x ssh_all.sh \n chmod +x scp_all.sh\n chmod +x ssh_root.sh\n chmod +x exe.sh\n ```\n- 分发到其他主机\n ```bash\n ./ssh_all.sh mkdir /home/hadoop/bin\n ./scp_all.sh /home/hadoop/bin/ips /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/exe.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/scp_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_root.sh /home/hadoop/bin/\n ```\n\n- 将 `/home/hadoop/bin`添加到hadoop用户的环境变量,需要切换到`hadoop`用户\n\n ```bash\n echo 'export PATH=$PATH:/home/hadoop/bin' >> ~/.bashrc && source ~/.bashrc\n scp_all.sh /home/hadoop/.bashrc /home/hadoop/\n ssh_all.sh source ~/.bashrc \n ```","source":"_posts/bigdata/hadoop/env.md","raw":"---\ntitle: Hadoop集群搭建基础环境\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 61253\ndate: 2024-09-011 22:45:40\n---\n\n### 防火墙关闭\n```bash\n# 在 6 台主机执行\nsystemctl stop firewalld\nsystemctl disable firewalld\n```\n### 配置yum源\n- 下载 repo 文件:\n [Centos-7.repo](http://mirrors.aliyun.com/repo/Centos-7.repo)\n 并上传到`/tmp`,进入到`/tmp`\n- 备份并且替换系统的repo文件\n ``` bash\n \tcp Centos-7.repo /etc/yum.repos.d/ \n\tcd /etc/yum.repos.d/ \n\tmv CentOS-Base.repo CentOS-Base.repo.bak \n\tmv Centos-7.repo CentOS-Base.repo\n\t ```\n- 将`nn1`上的`CentOS-Base.repo`拷贝到其他主机\n ```bash\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn3:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s1:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s3:/etc/yum.repos.d\n ```\n- 执行yum源更新命令\n ```bash\n\t yum clean all\n\t yum makecache \n\t yum update -y \n\t```\n- 安装常用软件\n ```bash\n yum install -y openssh-server vim gcc gcc-c++ glibc-headers bzip2-devel lzo-devel curl wget openssh-clients zlib-devel autoconf automake cmake libtool openssl-devel fuse-devel snappy-devel telnet unzip zip net-tools.x86_64 firewalld systemd ntp unrar bzip2\n ```\n### JDK安装\n>注意需要在六台机器依次执行\n- 上传到`/tmp`目录下,安装\n ```bash\n cd /tmp\n rpm -ivh jdk-8u144-linux-x64.rpm\n ```\n- 配置环境变量\n ```bash\n ln -s /usr/java/jdk1.8.0_144/ /usr/java/jdk1.8\n echo 'export JAVA_HOME=/usr/java/jdk1.8' >> /etc/profile.d/myEnv.sh \n echo 'export PATH=$PATH:$JAVA_HOME/bin' >> /etc/profile.d/myEnv.sh \n source /etc/profile \n java -version\n ```\n### 修改主机名和主机名映射\n\n```bash\nvim /etc/hostname\n```\n6台机器分别为nn1、nn2、nn3、s1、s2、s3\n\n```bash\nvim /etc/hosts\n```\n\n修改为\n```text\n192.168.1.30 nn1\n192.168.1.31 nn2\n192.168.1.32 nn3\n192.168.1.33 s1\n192.168.1.34 s2\n192.168.1.35 s3\n```\n### 创建hadoop用户\n```bash\n#创建hadoop用户 \nuseradd hadoop \n#给hadoop用户设置密码: 12345678 \npasswd hadoop\n```\n### 禁止非 wheel 组用户切换到root,配置免密切换root\n- 修改/etc/pam.d/su配置\n ```bash\n sed -i 's/#auth\\t\\trequired\\tpam_wheel.so/auth\\t\\trequired\\tpam_wheel.so/g' '/etc/pam.d/su' \n sed -i 's/#auth\\t\\tsufficient\\tpam_wheel.so/auth\\t\\tsufficient\\tpam_wheel.so/g' '/etc/pam.d/su'\n ```\n- 修改/etc/login.defs文件\n ```bash\n echo \"SU_WHEEL_ONLY yes\" >> /etc/login.defs\n ```\n- 添加用户到管理员,禁止普通用户su 到 root\n ```bash\n #把hadoop用户加到wheel组里\n gpasswd -a hadoop wheel\n #查看wheel组里是否有hadoop用户\n cat /etc/group | grep wheel\n ```\n### 给hadoop用户,配置SSH密钥\n#### 配置hadoop用户ssh免密码登录到hadoop\n- 仅在`nn1`执行这段脚本命令即可\n 但是 `su - hadoop ` ,` mkdir ~/.ssh` 需要在其他主机执行一下\n ```bash\n #切换到hadoop用户 \n su - hadoop\n #生成ssh公私钥 \n ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''\n ssh-copy-id nn1\n ssh-copy-id nn2\n ssh-copy-id nn3\n ssh-copy-id s1\n ssh-copy-id s2\n ssh-copy-id s3\n scp /home/hadoop/.ssh/id_rsa hadoop@nn2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@nn3:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s1:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s3:/home/hadoop/.ssh\n ```\n#### 配置hadoop用户ssh免密码登录到root\n- 同上\n ```bash\n ssh-copy-id root@nn1\n ssh-copy-id root@ nn2\n ssh-copy-id root@nn3\n ssh-copy-id root@s1\n ssh-copy-id root@s2\n ssh-copy-id root@s3\n scp /home/hadoop/.ssh/id_rsa root@nn2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@nn3:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s1:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s3:/root/.ssh\n ```\n### 脚本配置\n- **ips**\n ```bash\n vim /home/hadoop/bin/ips\n ```\n \n ```bash\n nn1 \n nn2\n nn3\n s1 \n s2 \n s3\n ```\n- **ssh_all.sh**\n ```bash\n vim /home/hadoop/bin/ssh_all.sh\n ```\n\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} \\\"$*\\\" \"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **ssh_root.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} ~/bin/exe.sh \\\"$*\\\"\"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **scp_all.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 源\n source_=$1\n # 目标\n target=$2\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接scp命令: scp 源 hadoop@nn1.hadoop:目标\n cmd_=\"scp -r ${source_} hadoop@${ip}:${target}\"\n echo $cmd_\n # 通过eval命令 执行 拼接的scp 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **exe.sh**\n ```bash\n #切换到root用户执行cmd命令\n cmd=$*\n su - << EOF\n $cmd\n EOF\n ```\n- 赋予执行权限\n ```bash\n chmod +x ssh_all.sh \n chmod +x scp_all.sh\n chmod +x ssh_root.sh\n chmod +x exe.sh\n ```\n- 分发到其他主机\n ```bash\n ./ssh_all.sh mkdir /home/hadoop/bin\n ./scp_all.sh /home/hadoop/bin/ips /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/exe.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/scp_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_root.sh /home/hadoop/bin/\n ```\n\n- 将 `/home/hadoop/bin`添加到hadoop用户的环境变量,需要切换到`hadoop`用户\n\n ```bash\n echo 'export PATH=$PATH:/home/hadoop/bin' >> ~/.bashrc && source ~/.bashrc\n scp_all.sh /home/hadoop/.bashrc /home/hadoop/\n ssh_all.sh source ~/.bashrc \n ```","slug":"bigdata/hadoop/env","published":1,"updated":"2024-09-11T14:45:28.095Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5pa000kgkah91xjbtcr","content":"

防火墙关闭

1
2
3
# 在 6 台主机执行
systemctl stop firewalld
systemctl disable firewalld
\n

配置yum源

\n

JDK安装

\n

注意需要在六台机器依次执行

\n
\n\n

修改主机名和主机名映射

1
vim /etc/hostname
\n

6台机器分别为nn1、nn2、nn3、s1、s2、s3

\n
1
vim /etc/hosts
\n\n

修改为

\n
1
2
3
4
5
6
192.168.1.30 nn1
192.168.1.31 nn2
192.168.1.32 nn3
192.168.1.33 s1
192.168.1.34 s2
192.168.1.35 s3
\n

创建hadoop用户

1
2
3
4
#创建hadoop用户 
useradd hadoop
#给hadoop用户设置密码: 12345678
passwd hadoop
\n

禁止非 wheel 组用户切换到root,配置免密切换root

\n

给hadoop用户,配置SSH密钥

配置hadoop用户ssh免密码登录到hadoop

\n

配置hadoop用户ssh免密码登录到root

\n

脚本配置

\n","cover":false,"excerpt":"","more":"

防火墙关闭

1
2
3
# 在 6 台主机执行
systemctl stop firewalld
systemctl disable firewalld
\n

配置yum源

\n

JDK安装

\n

注意需要在六台机器依次执行

\n
\n\n

修改主机名和主机名映射

1
vim /etc/hostname
\n

6台机器分别为nn1、nn2、nn3、s1、s2、s3

\n
1
vim /etc/hosts
\n\n

修改为

\n
1
2
3
4
5
6
192.168.1.30 nn1
192.168.1.31 nn2
192.168.1.32 nn3
192.168.1.33 s1
192.168.1.34 s2
192.168.1.35 s3
\n

创建hadoop用户

1
2
3
4
#创建hadoop用户 
useradd hadoop
#给hadoop用户设置密码: 12345678
passwd hadoop
\n

禁止非 wheel 组用户切换到root,配置免密切换root

\n

给hadoop用户,配置SSH密钥

配置hadoop用户ssh免密码登录到hadoop

\n

配置hadoop用户ssh免密码登录到root

\n

脚本配置

\n"},{"title":"无法访问外网?需要订阅代理服务?","abbrlink":14011,"date":"2024-08-07T02:06:08.000Z","_content":"\n{% note info %}\n**由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的\n的网站将被屏蔽拦截,接下来我将给出一种使用`VPN`服务的可行的方案来保证服务的可靠性。**\n{% endnote %}\n\n### 介绍\n> 根据测试,许多提供服务的提供商所在的网站需要使用`外部网络`才能打开,仅有少部分的网站(**比较贵**)可以直接使用\n> 国内网络环境打开直接购买订阅服务。\n>\n\n那么你现在可以有两个选择:\n1. **方案一**:使用无需`外部网络`便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。\n2. **方案二**:如果使用此方案,详见下方。\n\n\n### 解决方案\n> 采用**方案二**方式\n> \n> 这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)\n> \n> 此教程中我们使用的机场是 \n> 1. `一元机场`: https://xn--4gq62f52gdss.com/\n> 2. `赔钱机场`:https://xn--mes358aby2apfg.com/\n\n### 机场选择的建议:\n\n- `一元机场`\n ![](/img/yiyuan.png)\n 可以看到\n - `12元/年`,每月50GB的可用额度,下个月重置流量额度\n - `15元/季`,即为`60元/年`,每月有4000GB的可用额度,下个月重置流量额度\n - `7元/月`,即为`84元/年`,每个月8000GB的可用额度,下个月重置流量额度\n 根据我个人的使用情况,大多数情况下我每月的流量使用未超过50GB,如果没有频繁的流量使用,\n 建议选择`12元/年`,否则可以选择`15元/季`,这意味着每月将有4000GB的可用额度\n\n- `赔钱机场`\n\n ![](/img/peiqian.png)\n `赔钱机场`的订阅共有9种方案,这里我仅显示自己正在使用的,个人认为十分优惠:\n - `34.99元/年`,每月500GB的可用额度,根据我观察和使用,这个订阅方案比`一元机场`的性价比更高,且流量使用额度也不用担心\n \n### 如何订阅?\n{% note success %}\n由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱`15202078626@163.com`发送你的订阅计划方案,\n扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)\n{% endnote %}\n\n\n### 完成订阅后如何使用?\n> 你可以在 `Windows`、`Mac`、`Android`等平台使用此服务\n> 使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60\n\n\n ","source":"_posts/net/jichang/jichang.md","raw":"---\ntitle: 无法访问外网?需要订阅代理服务?\ntags: 网络代理\nabbrlink: 14011\ndate: 2024-08-07 10:06:08\n---\n\n{% note info %}\n**由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的\n的网站将被屏蔽拦截,接下来我将给出一种使用`VPN`服务的可行的方案来保证服务的可靠性。**\n{% endnote %}\n\n### 介绍\n> 根据测试,许多提供服务的提供商所在的网站需要使用`外部网络`才能打开,仅有少部分的网站(**比较贵**)可以直接使用\n> 国内网络环境打开直接购买订阅服务。\n>\n\n那么你现在可以有两个选择:\n1. **方案一**:使用无需`外部网络`便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。\n2. **方案二**:如果使用此方案,详见下方。\n\n\n### 解决方案\n> 采用**方案二**方式\n> \n> 这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)\n> \n> 此教程中我们使用的机场是 \n> 1. `一元机场`: https://xn--4gq62f52gdss.com/\n> 2. `赔钱机场`:https://xn--mes358aby2apfg.com/\n\n### 机场选择的建议:\n\n- `一元机场`\n ![](/img/yiyuan.png)\n 可以看到\n - `12元/年`,每月50GB的可用额度,下个月重置流量额度\n - `15元/季`,即为`60元/年`,每月有4000GB的可用额度,下个月重置流量额度\n - `7元/月`,即为`84元/年`,每个月8000GB的可用额度,下个月重置流量额度\n 根据我个人的使用情况,大多数情况下我每月的流量使用未超过50GB,如果没有频繁的流量使用,\n 建议选择`12元/年`,否则可以选择`15元/季`,这意味着每月将有4000GB的可用额度\n\n- `赔钱机场`\n\n ![](/img/peiqian.png)\n `赔钱机场`的订阅共有9种方案,这里我仅显示自己正在使用的,个人认为十分优惠:\n - `34.99元/年`,每月500GB的可用额度,根据我观察和使用,这个订阅方案比`一元机场`的性价比更高,且流量使用额度也不用担心\n \n### 如何订阅?\n{% note success %}\n由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱`15202078626@163.com`发送你的订阅计划方案,\n扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)\n{% endnote %}\n\n\n### 完成订阅后如何使用?\n> 你可以在 `Windows`、`Mac`、`Android`等平台使用此服务\n> 使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60\n\n\n ","slug":"net/jichang/jichang","published":1,"updated":"2024-08-09T12:21:50.036Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5pc000ngkahczoqfdow","content":"

由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的
的网站将被屏蔽拦截,接下来我将给出一种使用VPN服务的可行的方案来保证服务的可靠性。

\n
\n\n

介绍

\n

根据测试,许多提供服务的提供商所在的网站需要使用外部网络才能打开,仅有少部分的网站(比较贵)可以直接使用
国内网络环境打开直接购买订阅服务。

\n
\n

那么你现在可以有两个选择:

\n
    \n
  1. 方案一:使用无需外部网络便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。
  2. \n
  3. 方案二:如果使用此方案,详见下方。
  4. \n
\n

解决方案

\n

采用方案二方式

\n

这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)

\n

此教程中我们使用的机场是

\n
    \n
  1. 一元机场: https://xn--4gq62f52gdss.com/
  2. \n
  3. 赔钱机场https://xn--mes358aby2apfg.com/
  4. \n
\n
\n

机场选择的建议:

\n

如何订阅?

由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱15202078626@163.com发送你的订阅计划方案,
扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)

\n
\n\n\n

完成订阅后如何使用?

\n

你可以在 WindowsMacAndroid等平台使用此服务
使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60

\n
\n","cover":false,"excerpt":"","more":"

由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的
的网站将被屏蔽拦截,接下来我将给出一种使用VPN服务的可行的方案来保证服务的可靠性。

\n
\n\n

介绍

\n

根据测试,许多提供服务的提供商所在的网站需要使用外部网络才能打开,仅有少部分的网站(比较贵)可以直接使用
国内网络环境打开直接购买订阅服务。

\n
\n

那么你现在可以有两个选择:

\n
    \n
  1. 方案一:使用无需外部网络便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。
  2. \n
  3. 方案二:如果使用此方案,详见下方。
  4. \n
\n

解决方案

\n

采用方案二方式

\n

这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)

\n

此教程中我们使用的机场是

\n
    \n
  1. 一元机场: https://xn--4gq62f52gdss.com/
  2. \n
  3. 赔钱机场https://xn--mes358aby2apfg.com/
  4. \n
\n
\n

机场选择的建议:

\n

如何订阅?

由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱15202078626@163.com发送你的订阅计划方案,
扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)

\n
\n\n\n

完成订阅后如何使用?

\n

你可以在 WindowsMacAndroid等平台使用此服务
使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60

\n
\n"},{"title":"Hadoop集群HDFS配置","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":61252,"date":"2024-09-11T14:45:40.000Z","_content":"\n### 上传`hadoop-3.1.4.tar.gz`到`/tmp`,解压\n>注意在六台机器均上传到`/tmp`\n```bash\n# 在6台机器执行\nsudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/\n# 分发到其他主机\nssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4\nssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop\n```\n### 配置环境变量\n```bash\necho 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh\n```\n\n```bash\n# 分发到nn2、nn3、s1、s2、s3\nscp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n# source 环境变量\nssh_root.sh source /etc/profile\n```\n>还需要创建 `/data`这个目录,由于nn1、nn2、nn3已经创建`/data`,其他三台需要创建一下\n```bash\n### 在s1、s2、s3执行\nsudo mkdir /data\nsudo chown -R hadoop:hadoop /data\n```\n\n### 修改core-site.xml\n```bash\nvim /usr/local/hadoop/etc/hadoop/core-site.xml \n```\n\n```xml\n\n\t\n\t fs.defaultFS\n\t hdfs://ns1\n\t 默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name\n\t\n\t\n\t\n\t hadoop.tmp.dir\n\t /data/tmp\n\t 数据存储目录\n\t\n\t\n\t\n\t hadoop.proxyuser.root.groups\n\t hadoop\n\t \n\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,\n\t yarn rmadmin –refreshSuperUserGroupsConfiguration\n\t 使用这两个命令不用重启就能刷新\n\t \n\t\n\t\n\t\n\t hadoop.proxyuser.root.hosts\n\t localhost\n\t 本地代理\n\t\n\t\n\t\n\t \n\t\tha.zookeeper.quorum \n\t\tnn1:2181,nn2:2181,nn3:2181 \n\t\tHA使用的zookeeper地址 \n\t\n\n```\n### 修改`hdfs-site.xml`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hdfs-site.xml \n```\n\n```xml\n\n \n dfs.namenode.name.dir\n /data/namenode\n namenode本地文件存放地址\n \n \n \n dfs.nameservices\n ns1\n 提供服务的NS逻辑名称,与core-site.xml里的对应\n \n \n \n \n \n dfs.ha.namenodes.ns1\n nn1,nn2,nn3\n 列出该逻辑名称下的NameNode逻辑名称\n \n \n \n dfs.namenode.rpc-address.ns1.nn1\n nn1:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn1\n nn1:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn2\n nn2:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn2\n nn2:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn3\n nn3:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn3\n nn3:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.handler.count\n 77\n namenode的工作线程数\n \n\n \n \n dfs.namenode.shared.edits.dir\n qjournal://nn1:8485;nn2:8485;nn3:8485/ns1\n 指定用于HA存放edits的共享存储,通常是namenode的所在机器\n \n \n \n dfs.journalnode.edits.dir\n /data/journaldata/\n journaldata服务存放文件的地址\n \n \n \n ipc.client.connect.max.retries\n 10\n namenode和journalnode的链接重试次数10次\n \n \n \n ipc.client.connect.retry.interval\n 10000\n 重试的间隔时间10s\n \n\n \n \n dfs.ha.fencing.methods\n sshfence\n 指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述\n \n \n \n dfs.ha.fencing.ssh.private-key-files\n /home/hadoop/.ssh/id_rsa\n 杀死命令脚本的免密配置秘钥\n \n \n \n dfs.client.failover.proxy.provider.ns1\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n 指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类\n \n \n \n dfs.client.failover.proxy.provider.auto-ha\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n \n \n \n dfs.ha.automatic-failover.enabled\n true\n \n\t\n\t\n\t dfs.datanode.data.dir\n\t /data/datanode\n\t datanode本地文件存放地址\n\t\n\t\n\t dfs.replication\n\t 3\n\t 文件复本数\n\t\n\t\n\t dfs.namenode.datanode.registration.ip-hostname-check\n\t false\n\t\n\t\n\t dfs.client.use.datanode.hostname\n\t true\n\t\n\t\n\t dfs.datanode.use.datanode.hostname\n\t true\n\t\n\n```\n### 修改`hadoop-env.sh`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hadoop-env.sh\n```\n\n```bash\n# 添加这两行\nsource /etc/profile \nexport HADOOP_HEAPSIZE_MAX=512\n```\n### 分发这些配置文件\n```bash\nscp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/\n```\n### 集群初始化\n- 需要先启动zookeeper集群\n ```bash\n ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n ```\n\n ```bash\n # 第一次启动先启动journalnode,便于3个namenode的元数据同步\n ssh_all_zookeeper.sh hadoop-daemon.sh start journalnode\n ```\n- `zkfc`搭建\n ```bash\n #在nn1节点执行 \n hdfs zkfc -formatZK\n #nn1 nn2 nn3启动zkfc \n hadoop-daemon.sh start zkfc\n ```\n- 初始化nn1的namenode,在nn1执行\n ```bash\n hdfs namenode -format \n hadoop-daemon.sh start namenode\n ```\n- 格式化第二台和第三台namenode,并且启动namenode,在nn2、nn3执行\n ```bash\n hdfs namenode -bootstrapStandby \n hadoop-daemon.sh start namenode\n ```\n- 修改**workers**\n ```bash\n vim /usr/local/hadoop/etc/hadoop/workers\n ```\n 修改为\n ```text\n s1\n s2\n s3\n ```\n 分发给其他机器\n ```bash\n scp_all.sh /usr/local/hadoop/etc/hadoop/workers /usr/local/hadoop/etc/hadoop\n ```\n- 启动datanode节点,在s1、s2、s3执行\n\n ```bash\n #启动各个节点的datanode\n hadoop-daemons.sh start datanode\n ```\n### 集群启动\n```bash\nstart-dfs.sh\n```","source":"_posts/bigdata/hadoop/hdfs.md","raw":"---\ntitle: Hadoop集群HDFS配置\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 61252\ndate: 2024-09-011 22:45:40\n---\n\n### 上传`hadoop-3.1.4.tar.gz`到`/tmp`,解压\n>注意在六台机器均上传到`/tmp`\n```bash\n# 在6台机器执行\nsudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/\n# 分发到其他主机\nssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4\nssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop\n```\n### 配置环境变量\n```bash\necho 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh\n```\n\n```bash\n# 分发到nn2、nn3、s1、s2、s3\nscp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n# source 环境变量\nssh_root.sh source /etc/profile\n```\n>还需要创建 `/data`这个目录,由于nn1、nn2、nn3已经创建`/data`,其他三台需要创建一下\n```bash\n### 在s1、s2、s3执行\nsudo mkdir /data\nsudo chown -R hadoop:hadoop /data\n```\n\n### 修改core-site.xml\n```bash\nvim /usr/local/hadoop/etc/hadoop/core-site.xml \n```\n\n```xml\n\n\t\n\t fs.defaultFS\n\t hdfs://ns1\n\t 默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name\n\t\n\t\n\t\n\t hadoop.tmp.dir\n\t /data/tmp\n\t 数据存储目录\n\t\n\t\n\t\n\t hadoop.proxyuser.root.groups\n\t hadoop\n\t \n\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,\n\t yarn rmadmin –refreshSuperUserGroupsConfiguration\n\t 使用这两个命令不用重启就能刷新\n\t \n\t\n\t\n\t\n\t hadoop.proxyuser.root.hosts\n\t localhost\n\t 本地代理\n\t\n\t\n\t\n\t \n\t\tha.zookeeper.quorum \n\t\tnn1:2181,nn2:2181,nn3:2181 \n\t\tHA使用的zookeeper地址 \n\t\n\n```\n### 修改`hdfs-site.xml`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hdfs-site.xml \n```\n\n```xml\n\n \n dfs.namenode.name.dir\n /data/namenode\n namenode本地文件存放地址\n \n \n \n dfs.nameservices\n ns1\n 提供服务的NS逻辑名称,与core-site.xml里的对应\n \n \n \n \n \n dfs.ha.namenodes.ns1\n nn1,nn2,nn3\n 列出该逻辑名称下的NameNode逻辑名称\n \n \n \n dfs.namenode.rpc-address.ns1.nn1\n nn1:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn1\n nn1:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn2\n nn2:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn2\n nn2:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn3\n nn3:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn3\n nn3:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.handler.count\n 77\n namenode的工作线程数\n \n\n \n \n dfs.namenode.shared.edits.dir\n qjournal://nn1:8485;nn2:8485;nn3:8485/ns1\n 指定用于HA存放edits的共享存储,通常是namenode的所在机器\n \n \n \n dfs.journalnode.edits.dir\n /data/journaldata/\n journaldata服务存放文件的地址\n \n \n \n ipc.client.connect.max.retries\n 10\n namenode和journalnode的链接重试次数10次\n \n \n \n ipc.client.connect.retry.interval\n 10000\n 重试的间隔时间10s\n \n\n \n \n dfs.ha.fencing.methods\n sshfence\n 指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述\n \n \n \n dfs.ha.fencing.ssh.private-key-files\n /home/hadoop/.ssh/id_rsa\n 杀死命令脚本的免密配置秘钥\n \n \n \n dfs.client.failover.proxy.provider.ns1\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n 指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类\n \n \n \n dfs.client.failover.proxy.provider.auto-ha\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n \n \n \n dfs.ha.automatic-failover.enabled\n true\n \n\t\n\t\n\t dfs.datanode.data.dir\n\t /data/datanode\n\t datanode本地文件存放地址\n\t\n\t\n\t dfs.replication\n\t 3\n\t 文件复本数\n\t\n\t\n\t dfs.namenode.datanode.registration.ip-hostname-check\n\t false\n\t\n\t\n\t dfs.client.use.datanode.hostname\n\t true\n\t\n\t\n\t dfs.datanode.use.datanode.hostname\n\t true\n\t\n\n```\n### 修改`hadoop-env.sh`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hadoop-env.sh\n```\n\n```bash\n# 添加这两行\nsource /etc/profile \nexport HADOOP_HEAPSIZE_MAX=512\n```\n### 分发这些配置文件\n```bash\nscp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/\n```\n### 集群初始化\n- 需要先启动zookeeper集群\n ```bash\n ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n ```\n\n ```bash\n # 第一次启动先启动journalnode,便于3个namenode的元数据同步\n ssh_all_zookeeper.sh hadoop-daemon.sh start journalnode\n ```\n- `zkfc`搭建\n ```bash\n #在nn1节点执行 \n hdfs zkfc -formatZK\n #nn1 nn2 nn3启动zkfc \n hadoop-daemon.sh start zkfc\n ```\n- 初始化nn1的namenode,在nn1执行\n ```bash\n hdfs namenode -format \n hadoop-daemon.sh start namenode\n ```\n- 格式化第二台和第三台namenode,并且启动namenode,在nn2、nn3执行\n ```bash\n hdfs namenode -bootstrapStandby \n hadoop-daemon.sh start namenode\n ```\n- 修改**workers**\n ```bash\n vim /usr/local/hadoop/etc/hadoop/workers\n ```\n 修改为\n ```text\n s1\n s2\n s3\n ```\n 分发给其他机器\n ```bash\n scp_all.sh /usr/local/hadoop/etc/hadoop/workers /usr/local/hadoop/etc/hadoop\n ```\n- 启动datanode节点,在s1、s2、s3执行\n\n ```bash\n #启动各个节点的datanode\n hadoop-daemons.sh start datanode\n ```\n### 集群启动\n```bash\nstart-dfs.sh\n```","slug":"bigdata/hadoop/hdfs","published":1,"updated":"2024-09-11T14:51:42.712Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5pc000ogkahdapz0pww","content":"

上传hadoop-3.1.4.tar.gz/tmp,解压

\n

注意在六台机器均上传到/tmp

\n
\n
1
2
3
4
5
# 在6台机器执行
sudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/
# 分发到其他主机
ssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4
ssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop
\n

配置环境变量

1
2
3
echo 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh
\n\n
1
2
3
4
# 分发到nn2、nn3、s1、s2、s3
scp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/
# source 环境变量
ssh_root.sh source /etc/profile
\n
\n

还需要创建 /data这个目录,由于nn1、nn2、nn3已经创建/data,其他三台需要创建一下

\n
\n
1
2
3
### 在s1、s2、s3执行
sudo mkdir /data
sudo chown -R hadoop:hadoop /data
\n\n

修改core-site.xml

1
vim /usr/local/hadoop/etc/hadoop/core-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
<configuration>
\t<property>
\t <name>fs.defaultFS</name>
\t <value>hdfs://ns1</value>
\t <description>默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name</description>
\t</property>
\t
\t<property>
\t <name>hadoop.tmp.dir</name>
\t <value>/data/tmp</value>
\t <description>数据存储目录</description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.groups</name>
\t <value>hadoop</value>
\t <description>
\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,
\t yarn rmadmin –refreshSuperUserGroupsConfiguration
\t 使用这两个命令不用重启就能刷新
\t </description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.hosts</name>
\t <value>localhost</value>
\t <description>本地代理</description>
\t</property>
\t
\t<!-- zkfc的配置 -->
\t<property>
\t\t<name>ha.zookeeper.quorum</name>
\t\t<value>nn1:2181,nn2:2181,nn3:2181</value>
\t\t<description>HA使用的zookeeper地址</description>
\t</property>
</configuration>
\n

修改hdfs-site.xml

1
vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data/namenode</value>
<description>namenode本地文件存放地址</description>
</property>

<property>
<name>dfs.nameservices</name>
<value>ns1</value>
<description>提供服务的NS逻辑名称,与core-site.xml里的对应</description>
</property>

<!-- namenode的配置 -->
<!-- 主要的 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2,nn3</value>
<description>列出该逻辑名称下的NameNode逻辑名称</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>nn1:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>nn1:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>nn2:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>nn2:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn3</name>
<value>nn3:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn3</name>
<value>nn3:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.handler.count</name>
<value>77</value>
<description>namenode的工作线程数</description>
</property>

<!-- journaldata配置,使得其他两个namenode同步第一个namenode数据 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://nn1:8485;nn2:8485;nn3:8485/ns1</value>
<description>指定用于HA存放edits的共享存储,通常是namenode的所在机器</description>
</property>

<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data/journaldata/</value>
<description>journaldata服务存放文件的地址</description>
</property>

<property>
<name>ipc.client.connect.max.retries</name>
<value>10</value>
<description>namenode和journalnode的链接重试次数10次</description>
</property>

<property>
<name>ipc.client.connect.retry.interval</name>
<value>10000</value>
<description>重试的间隔时间10s</description>
</property>

<!-- zkfc的配置 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<description>指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述</description>
</property>

<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
<description>杀死命令脚本的免密配置秘钥</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
<description>指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.auto-ha</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
\t<!-- datanode配置 -->
\t<property>
\t <name>dfs.datanode.data.dir</name>
\t <value>/data/datanode</value>
\t <description>datanode本地文件存放地址</description>
\t</property>
\t<property>
\t <name>dfs.replication</name>
\t <value>3</value>
\t <description>文件复本数</description>
\t</property>
\t<property>
\t <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
\t <value>false</value>
\t</property>
\t<property>
\t <name>dfs.client.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
\t<property>
\t <name>dfs.datanode.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
</configuration>
\n

修改hadoop-env.sh

1
vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
\n\n
1
2
3
# 添加这两行
source /etc/profile
export HADOOP_HEAPSIZE_MAX=512
\n

分发这些配置文件

1
2
3
scp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/
\n

集群初始化

\n

集群启动

1
start-dfs.sh
","cover":false,"excerpt":"","more":"

上传hadoop-3.1.4.tar.gz/tmp,解压

\n

注意在六台机器均上传到/tmp

\n
\n
1
2
3
4
5
# 在6台机器执行
sudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/
# 分发到其他主机
ssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4
ssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop
\n

配置环境变量

1
2
3
echo 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh
\n\n
1
2
3
4
# 分发到nn2、nn3、s1、s2、s3
scp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/
# source 环境变量
ssh_root.sh source /etc/profile
\n
\n

还需要创建 /data这个目录,由于nn1、nn2、nn3已经创建/data,其他三台需要创建一下

\n
\n
1
2
3
### 在s1、s2、s3执行
sudo mkdir /data
sudo chown -R hadoop:hadoop /data
\n\n

修改core-site.xml

1
vim /usr/local/hadoop/etc/hadoop/core-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
<configuration>
\t<property>
\t <name>fs.defaultFS</name>
\t <value>hdfs://ns1</value>
\t <description>默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name</description>
\t</property>
\t
\t<property>
\t <name>hadoop.tmp.dir</name>
\t <value>/data/tmp</value>
\t <description>数据存储目录</description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.groups</name>
\t <value>hadoop</value>
\t <description>
\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,
\t yarn rmadmin –refreshSuperUserGroupsConfiguration
\t 使用这两个命令不用重启就能刷新
\t </description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.hosts</name>
\t <value>localhost</value>
\t <description>本地代理</description>
\t</property>
\t
\t<!-- zkfc的配置 -->
\t<property>
\t\t<name>ha.zookeeper.quorum</name>
\t\t<value>nn1:2181,nn2:2181,nn3:2181</value>
\t\t<description>HA使用的zookeeper地址</description>
\t</property>
</configuration>
\n

修改hdfs-site.xml

1
vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data/namenode</value>
<description>namenode本地文件存放地址</description>
</property>

<property>
<name>dfs.nameservices</name>
<value>ns1</value>
<description>提供服务的NS逻辑名称,与core-site.xml里的对应</description>
</property>

<!-- namenode的配置 -->
<!-- 主要的 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2,nn3</value>
<description>列出该逻辑名称下的NameNode逻辑名称</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>nn1:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>nn1:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>nn2:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>nn2:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn3</name>
<value>nn3:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn3</name>
<value>nn3:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.handler.count</name>
<value>77</value>
<description>namenode的工作线程数</description>
</property>

<!-- journaldata配置,使得其他两个namenode同步第一个namenode数据 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://nn1:8485;nn2:8485;nn3:8485/ns1</value>
<description>指定用于HA存放edits的共享存储,通常是namenode的所在机器</description>
</property>

<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data/journaldata/</value>
<description>journaldata服务存放文件的地址</description>
</property>

<property>
<name>ipc.client.connect.max.retries</name>
<value>10</value>
<description>namenode和journalnode的链接重试次数10次</description>
</property>

<property>
<name>ipc.client.connect.retry.interval</name>
<value>10000</value>
<description>重试的间隔时间10s</description>
</property>

<!-- zkfc的配置 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<description>指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述</description>
</property>

<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
<description>杀死命令脚本的免密配置秘钥</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
<description>指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.auto-ha</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
\t<!-- datanode配置 -->
\t<property>
\t <name>dfs.datanode.data.dir</name>
\t <value>/data/datanode</value>
\t <description>datanode本地文件存放地址</description>
\t</property>
\t<property>
\t <name>dfs.replication</name>
\t <value>3</value>
\t <description>文件复本数</description>
\t</property>
\t<property>
\t <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
\t <value>false</value>
\t</property>
\t<property>
\t <name>dfs.client.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
\t<property>
\t <name>dfs.datanode.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
</configuration>
\n

修改hadoop-env.sh

1
vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
\n\n
1
2
3
# 添加这两行
source /etc/profile
export HADOOP_HEAPSIZE_MAX=512
\n

分发这些配置文件

1
2
3
scp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/
\n

集群初始化

\n

集群启动

1
start-dfs.sh
"},{"title":"Hadoop集群Zookeeper配置","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":61251,"date":"2024-09-11T14:45:40.000Z","_content":"\n### Zookeeper脚本配置\n- 拷贝ips\n ```bash\n cp ips ips_zookeeper\n ```\n 修改为\n ```bash\n nn1\n nn2\n nn3\n ```\n- 拷贝三个脚本\n ```bash\n cp scp_all.sh scp_all_zookeeper.sh \n cp ssh_all.sh ssh_all_zookeeper.sh \n cp ssh_root.sh ssh_root_zookeeper.sh\n ```\n- 修改脚本\n ```shell\n vim scp_all_zookeeper.sh \n vim ssh_all_zookeeper.sh \n vim ssh_root_zookeeper.sh \n ```\n\n 将三个脚本中的ips改为ips_zookeeper\n### Zookeeper安装\n- 上传到`/tmp`目录下,解压\n ```bash\n sudo tar -zxvf /tmp/zookeeper-3.4.8.tar.gz -C /usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn2:/usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn3:/usr/local/\n ssh_root_zookeeper.sh chown -R hadoop:hadoop /usr/local/zookeeper-3.4.8\n ssh_root_zookeeper.s ln -s /usr/local/zookeeper-3.4.8/ /usr/local/zookeeper\n ```\n### Zookeeper配置\n - zoo.cfg配置\n ```bash\n cd /usr/local/zookeeper/conf/\n cp zoo_sample.cfg zoo.cfg\n ```\n 然后`vim zoo.cfg`,修改如下:\n ```properties\n # 修改dataDir\n dataDir=/data/zookeeper\n # 添加一下内容\n server.1=nn1:2888:3888 \n server.2=nn2:2888:3888 \n server.3=nn3:2888:3888\n ```\n 分发给nn2、nn3\n `scp_all_zookeeper.sh /usr/local/zookeeper/conf/zoo.cfg /usr/local/zookeeper/conf/`\n\n- `zkEnv.sh`配置\n `vim /usr/local/zookeeper/bin/zkEnv.sh`\n ![000001.png](..%2F..%2F..%2Fimg%2F000001.png)\n 分发到nn2、nn3\n```bash\nscp_all_zookeeper.sh /usr/local/zookeeper/bin/zkEnv.sh /usr/local/zookeeper/bin/\n```\n- 创建zookeeper数据目录\n```bash\nssh_root_zookeeper.sh mkdir -p /data/zookeeper\nssh_root_zookeeper.sh chown -R hadoop:hadoop /data\n```\n- 创建myid文件\n ```bash\n ssh nn1 'echo \"1\" > /data/zookeeper/myid'\n ssh nn2 'echo \"2\" > /data/zookeeper/myid'\n ssh nn3 'echo \"3\" > /data/zookeeper/myid'\n ```\n- 配置Zookeeper环境变量\n ```bash\n # 在其他所有主机也执行\n sudo chown -R hadoop:hadoop /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n echo 'export ZOOKEEPER_HOME=/usr/local/zookeeper' >> /etc/profile.d/myEnv.sh\n echo 'export PATH=$PATH:$ZOOKEEPER_HOME/bin' >> /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n # 分发到nn2、nn3\n scp_all_zookeeper.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n # source 环境变量\n ssh_all_zookeeper.sh source /etc/profile\n ```\n### Zookeeper的命令\n```bash\n#启动zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n#查看每个机器ZK运行的状态\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status\n#整体停止服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop \n#重启zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart\n```\n\n```shell\n#启动zookeeper客户端,并连接zookeeper集群\n/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181\n# 可以简化为:\nzkCli.sh\n```\n\n","source":"_posts/bigdata/hadoop/zookeper.md","raw":"---\ntitle: Hadoop集群Zookeeper配置\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 61251\ndate: 2024-09-011 22:45:40\n---\n\n### Zookeeper脚本配置\n- 拷贝ips\n ```bash\n cp ips ips_zookeeper\n ```\n 修改为\n ```bash\n nn1\n nn2\n nn3\n ```\n- 拷贝三个脚本\n ```bash\n cp scp_all.sh scp_all_zookeeper.sh \n cp ssh_all.sh ssh_all_zookeeper.sh \n cp ssh_root.sh ssh_root_zookeeper.sh\n ```\n- 修改脚本\n ```shell\n vim scp_all_zookeeper.sh \n vim ssh_all_zookeeper.sh \n vim ssh_root_zookeeper.sh \n ```\n\n 将三个脚本中的ips改为ips_zookeeper\n### Zookeeper安装\n- 上传到`/tmp`目录下,解压\n ```bash\n sudo tar -zxvf /tmp/zookeeper-3.4.8.tar.gz -C /usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn2:/usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn3:/usr/local/\n ssh_root_zookeeper.sh chown -R hadoop:hadoop /usr/local/zookeeper-3.4.8\n ssh_root_zookeeper.s ln -s /usr/local/zookeeper-3.4.8/ /usr/local/zookeeper\n ```\n### Zookeeper配置\n - zoo.cfg配置\n ```bash\n cd /usr/local/zookeeper/conf/\n cp zoo_sample.cfg zoo.cfg\n ```\n 然后`vim zoo.cfg`,修改如下:\n ```properties\n # 修改dataDir\n dataDir=/data/zookeeper\n # 添加一下内容\n server.1=nn1:2888:3888 \n server.2=nn2:2888:3888 \n server.3=nn3:2888:3888\n ```\n 分发给nn2、nn3\n `scp_all_zookeeper.sh /usr/local/zookeeper/conf/zoo.cfg /usr/local/zookeeper/conf/`\n\n- `zkEnv.sh`配置\n `vim /usr/local/zookeeper/bin/zkEnv.sh`\n ![000001.png](..%2F..%2F..%2Fimg%2F000001.png)\n 分发到nn2、nn3\n```bash\nscp_all_zookeeper.sh /usr/local/zookeeper/bin/zkEnv.sh /usr/local/zookeeper/bin/\n```\n- 创建zookeeper数据目录\n```bash\nssh_root_zookeeper.sh mkdir -p /data/zookeeper\nssh_root_zookeeper.sh chown -R hadoop:hadoop /data\n```\n- 创建myid文件\n ```bash\n ssh nn1 'echo \"1\" > /data/zookeeper/myid'\n ssh nn2 'echo \"2\" > /data/zookeeper/myid'\n ssh nn3 'echo \"3\" > /data/zookeeper/myid'\n ```\n- 配置Zookeeper环境变量\n ```bash\n # 在其他所有主机也执行\n sudo chown -R hadoop:hadoop /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n echo 'export ZOOKEEPER_HOME=/usr/local/zookeeper' >> /etc/profile.d/myEnv.sh\n echo 'export PATH=$PATH:$ZOOKEEPER_HOME/bin' >> /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n # 分发到nn2、nn3\n scp_all_zookeeper.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n # source 环境变量\n ssh_all_zookeeper.sh source /etc/profile\n ```\n### Zookeeper的命令\n```bash\n#启动zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n#查看每个机器ZK运行的状态\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status\n#整体停止服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop \n#重启zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart\n```\n\n```shell\n#启动zookeeper客户端,并连接zookeeper集群\n/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181\n# 可以简化为:\nzkCli.sh\n```\n\n","slug":"bigdata/hadoop/zookeper","published":1,"updated":"2024-09-11T14:51:42.706Z","comments":1,"layout":"post","photos":[],"_id":"cm5uuk5pd000pgkah9e8m5jsz","content":"

Zookeeper脚本配置

\n

Zookeeper安装

\n

Zookeeper配置

\n

Zookeeper的命令

1
2
3
4
5
6
7
8
#启动zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start
#查看每个机器ZK运行的状态
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status
#整体停止服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop
#重启zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart
\n\n
1
2
3
4
#启动zookeeper客户端,并连接zookeeper集群
/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181
# 可以简化为:
zkCli.sh
\n\n","cover":false,"excerpt":"","more":"

Zookeeper脚本配置

\n

Zookeeper安装

\n

Zookeeper配置

\n

Zookeeper的命令

1
2
3
4
5
6
7
8
#启动zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start
#查看每个机器ZK运行的状态
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status
#整体停止服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop
#重启zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart
\n\n
1
2
3
4
#启动zookeeper客户端,并连接zookeeper集群
/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181
# 可以简化为:
zkCli.sh
\n\n"}],"PostAsset":[],"PostCategory":[{"post_id":"cm5uuk5p9000igkaha5d343gb","category_id":"cm5uuk5pb000lgkahbjmh60g8","_id":"cm5uuk5pe000sgkah9lh2b617"}],"PostTag":[{"post_id":"cm5uuk5p00005gkahhr400nj5","tag_id":"cm5uuk5p20008gkah4gb3145j","_id":"cm5uuk5p7000egkah35t40jmr"},{"post_id":"cm5uuk5p6000dgkah0iah71hw","tag_id":"cm5uuk5p7000fgkahdejp947z","_id":"cm5uuk5p8000ggkahfkogeyp6"},{"post_id":"cm5uuk5p9000hgkahdnb238fm","tag_id":"cm5uuk5p7000fgkahdejp947z","_id":"cm5uuk5pa000jgkah4el5fywv"},{"post_id":"cm5uuk5p9000igkaha5d343gb","tag_id":"cm5uuk5pc000mgkah6vq81ota","_id":"cm5uuk5pe000rgkahe3yxdyai"},{"post_id":"cm5uuk5pc000ngkahczoqfdow","tag_id":"cm5uuk5pe000qgkah5bcb2btn","_id":"cm5uuk5pe000tgkahdymu0vho"}],"Tag":[{"name":"machinelearning","_id":"cm5uuk5p20008gkah4gb3145j"},{"name":"uniapp","_id":"cm5uuk5p7000fgkahdejp947z"},{"name":"古文观止","_id":"cm5uuk5pc000mgkah6vq81ota"},{"name":"网络代理","_id":"cm5uuk5pe000qgkah5bcb2btn"}]}} \ No newline at end of file +{"meta":{"version":1,"warehouse":"5.0.1"},"models":{"Asset":[{"_id":"themes/butterfly/source/css/index.styl","path":"css/index.styl","modified":1,"renderable":1},{"_id":"themes/butterfly/source/css/var.styl","path":"css/var.styl","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/404.jpg","path":"img/404.jpg","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/avatar.jpg","path":"img/avatar.jpg","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/friend_404.gif","path":"img/friend_404.gif","modified":1,"renderable":1},{"_id":"themes/butterfly/source/img/favicon.png","path":"img/favicon.png","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/main.js","path":"js/main.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/tw_cn.js","path":"js/tw_cn.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/utils.js","path":"js/utils.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/search/algolia.js","path":"js/search/algolia.js","modified":1,"renderable":1},{"_id":"themes/butterfly/source/js/search/local-search.js","path":"js/search/local-search.js","modified":1,"renderable":1},{"_id":"source/img/000001.png","path":"img/000001.png","modified":1,"renderable":0},{"_id":"source/img/dingyue.png","path":"img/dingyue.png","modified":1,"renderable":0},{"_id":"source/img/peiqian.png","path":"img/peiqian.png","modified":1,"renderable":0},{"_id":"source/img/site01.jpg","path":"img/site01.jpg","modified":1,"renderable":0},{"_id":"source/img/yiyuan.png","path":"img/yiyuan.png","modified":1,"renderable":0},{"_id":"source/img/machinelearning/cros-valid.png","path":"img/machinelearning/cros-valid.png","modified":1,"renderable":0},{"_id":"source/img/machinelearning/kfold-skfold.png","path":"img/machinelearning/kfold-skfold.png","modified":1,"renderable":0},{"_id":"source/img/machinelearning/knn-01.png","path":"img/machinelearning/knn-01.png","modified":1,"renderable":0}],"Cache":[{"_id":"source/about/index.md","hash":"31fdd093368e1c18c1592a8cad6f3c3fe6d6711a","modified":1723257494626},{"_id":"source/_data/link.yml","hash":"6aaf04b5c920e403bea8f82e4b3f4d719760e6df","modified":1723257824766},{"_id":"source/_posts/hello-world.md","hash":"40e804610ff712f079ace7012b862b4efecf82fb","modified":1723206110026},{"_id":"source/categories/index.md","hash":"49618dce0bee26dfc65f0de1794f01d2967cb7b8","modified":1722407843090},{"_id":"source/tags/index.md","hash":"1f27b735b6c7d629c8931b5bd3913bdd659f1981","modified":1722385980076},{"_id":"source/link/index.md","hash":"26e21fe46bf7c0fc5ae95b73d52bf516368d6dc7","modified":1723257768927},{"_id":"source/_posts/machinelearning/knn.md","hash":"6e6c444dc7be367d0cc131b29c02414e619e416e","modified":1736846489069},{"_id":"source/music/index.md","hash":"d8dcf467af235e0bac09805db3a4ab73ad782b83","modified":1723257619897},{"_id":"source/img/machinelearning/kfold-skfold.png","hash":"ab841f5412b7ea773468a3facff643df68a88c01","modified":1736846071435},{"_id":"source/img/machinelearning/knn-01.png","hash":"2a931981d1d5f72d6b42b39b8ef313569eab853e","modified":1736766866160},{"_id":"source/_posts/net/index.md","hash":"7eae8512c2a6bd937200487540b962d65a47ad9e","modified":1723206110028},{"_id":"source/_posts/frontend/frontend.md","hash":"3770215d35203b03d86d4f3a6ceee32f9849b584","modified":1723206110023},{"_id":"source/_posts/ancient/guwenguanzhi/1.md","hash":"629652f1d8e2a347e6b11c367d25b0a26698cd60","modified":1723257063678},{"_id":"source/movies/index.md","hash":"4abf7a6a712f712f22d80203f6004a89feca2014","modified":1723257633715},{"_id":"source/_posts/frontend/deploy/deploy.md","hash":"ba4a05741f30f92f9fbe7b815519142de09b13fb","modified":1723206110045},{"_id":"source/_posts/frontend/uniapp/uniapp.md","hash":"8c8594e3eb73d2c10c4c6dfc008b58c36763b054","modified":1723206110039},{"_id":"source/_posts/bigdata/hadoop/env.md","hash":"4b184c804e9c8083966b1360fe9d5aa539930005","modified":1726065928095},{"_id":"source/_posts/bigdata/hadoop/hdfs.md","hash":"3148149529354105eb301c70961b6f1b68030835","modified":1726066302712},{"_id":"source/_posts/frontend/uniapp/component1.md","hash":"b9b981d3903f5e57e7f327d930df4f04780211f9","modified":1723206110042},{"_id":"source/_posts/linux/script.md","hash":"2deda929ed0c81ddcd00945c673b29f1bd1353c0","modified":1723997145731},{"_id":"themes/butterfly/_config.yml","hash":"c6fab3700a6502b5790ca20d20113020544ea048","modified":1722500710088},{"_id":"source/_posts/net/jichang/jichang.md","hash":"ad0485f33d2f0a8eea342c815d869911433df370","modified":1723206110036},{"_id":"themes/butterfly/plugins.yml","hash":"7bb2c7350c0c57850aa30213cd0f26553a614702","modified":1722500710224},{"_id":"themes/butterfly/package.json","hash":"2b6fb6f62b9fa6a829311ffd532ae760fbd0a7db","modified":1722500710223},{"_id":"themes/butterfly/.github/FUNDING.yml","hash":"3b572099a992e30267f5fe4cd3c582ff7ac9f083","modified":1722500710075},{"_id":"themes/butterfly/languages/default.yml","hash":"90e9e2f36dc51aa77eb7804ae048b4876035b12d","modified":1722500710090},{"_id":"themes/butterfly/languages/en.yml","hash":"68127be0e6b44cfc5f31353d8b275c02939b3ff9","modified":1722500710092},{"_id":"themes/butterfly/languages/zh-CN.yml","hash":"2dcc70a011b37890215ae0fd6d8f8c78aa8af6b0","modified":1722500710093},{"_id":"themes/butterfly/languages/zh-TW.yml","hash":"1392e7b8c678cdfb54f55523693e66abc7d80538","modified":1722500710094},{"_id":"source/_posts/bigdata/hadoop/zookeper.md","hash":"96e58c86eba61accb620adc5e7b7dbc46e47b237","modified":1726066302706},{"_id":"themes/butterfly/layout/archive.pug","hash":"bc77220dfc269b8faad0930e1a4142ebf68165e5","modified":1722500710096},{"_id":"themes/butterfly/layout/category.pug","hash":"bf979aec88d78b644fc5d31518f8679ad7625792","modified":1722500710097},{"_id":"themes/butterfly/layout/page.pug","hash":"bf2d6c6d2d156777b55292e51be02b0b3acf0af8","modified":1722500710219},{"_id":"themes/butterfly/layout/tag.pug","hash":"4bb5efc6dabdf1626685bf6771aaa1467155ae86","modified":1722500710222},{"_id":"themes/butterfly/layout/index.pug","hash":"648dcbdb3d145a710de81c909e000e8664d2ac9c","modified":1722500710218},{"_id":"themes/butterfly/LICENSE","hash":"c8bc7df08db9dd3b39c2c2259a163a36cf2f6808","modified":1722500710084},{"_id":"themes/butterfly/layout/post.pug","hash":"fdbb508b5e6dec30fb8753c5a7fdd494410c4fc0","modified":1722500710220},{"_id":"themes/butterfly/.github/ISSUE_TEMPLATE/bug_report.yml","hash":"eed9190301095b35081aa2658204cc3f15b9f5e1","modified":1722500710076},{"_id":"themes/butterfly/.github/ISSUE_TEMPLATE/config.yml","hash":"63ad2249ad09fb3fe21bd5ff9adefb304a7ab24a","modified":1722500710077},{"_id":"themes/butterfly/layout/includes/404.pug","hash":"aace9ddff469de4226e47a52ede1c81e66d66d5c","modified":1722500710100},{"_id":"themes/butterfly/layout/includes/footer.pug","hash":"8715948b93e7508b84d913be1969b28c6b067b9b","modified":1722500710102},{"_id":"themes/butterfly/layout/includes/additional-js.pug","hash":"50eea5aa78cdeb6c72dd22f0aeabc407cc0f712e","modified":1722500710101},{"_id":"themes/butterfly/layout/includes/head.pug","hash":"ea8d4e8ac6af93cd268ba8f6ffcb80417bc2501e","modified":1722500710103},{"_id":"themes/butterfly/layout/includes/layout.pug","hash":"96df62e34661d8ca4a45267286127479e5178a79","modified":1722500710123},{"_id":"themes/butterfly/layout/includes/pagination.pug","hash":"c5c58714fb3cb839653e5c32e6094784c8662935","modified":1722500710135},{"_id":"themes/butterfly/.github/ISSUE_TEMPLATE/feature_request.yml","hash":"6e0f9470b18bd37d4891282ac73d61676b040e8c","modified":1722500710079},{"_id":"themes/butterfly/layout/includes/rightside.pug","hash":"f448bf73103b88de4443e52d600e871cf3de3e32","modified":1722500710138},{"_id":"themes/butterfly/layout/includes/sidebar.pug","hash":"9277fead4c29dbe93976f078adaa26e8f9253da3","modified":1722500710140},{"_id":"themes/butterfly/scripts/events/comment.js","hash":"95479790234c291b064d031577d71214cdd1d820","modified":1722500710229},{"_id":"themes/butterfly/scripts/events/cdn.js","hash":"7864ba45716c51aef8d8b04fd4bc212e0008ce3b","modified":1722500710226},{"_id":"themes/butterfly/scripts/events/init.js","hash":"ce68e84a9ccfcf91100befbaa9afc392a0cd93bb","modified":1722500710229},{"_id":"themes/butterfly/scripts/events/merge_config.js","hash":"b1dfc3c898b886eab1241b068fc27d7a26a3b7d2","modified":1722500710230},{"_id":"themes/butterfly/scripts/events/stylus.js","hash":"0a336dfe5ed08952fa0df1532421df38a74a20d6","modified":1722500710231},{"_id":"themes/butterfly/scripts/events/welcome.js","hash":"f59e10305fef59ea3e62a7395106c0927582879d","modified":1722500710232},{"_id":"themes/butterfly/scripts/filters/post_lazyload.js","hash":"5ed2d7ef240c927fe1b7a7fb5bf9e55e2bfd55a5","modified":1722500710233},{"_id":"themes/butterfly/scripts/filters/random_cover.js","hash":"0df22d7dbfa766a65cb6032a1f003348f4307cfe","modified":1722500710234},{"_id":"themes/butterfly/scripts/helpers/aside_categories.js","hash":"cdd992c8577d583c237b6aac9f5077d8200879b2","modified":1722500710237},{"_id":"themes/butterfly/scripts/helpers/aside_archives.js","hash":"4f712b4ea383b59a3122683db1d54c04a79ccc5d","modified":1722500710236},{"_id":"themes/butterfly/scripts/events/404.js","hash":"f1d1c378356b776e9b2a8411e6dca88dc8c3245c","modified":1722500710226},{"_id":"themes/butterfly/scripts/helpers/page.js","hash":"c74d6a9b8f71e69447f7847a5f5e81555d68b140","modified":1722500710240},{"_id":"themes/butterfly/scripts/helpers/findArchiveLength.js","hash":"b12895e0765d596494e5526d121de0dd5a7c23d3","modified":1722500710238},{"_id":"themes/butterfly/scripts/helpers/related_post.js","hash":"76343ac8422c9c8539082e77eda6ffee4b877eb2","modified":1722500710241},{"_id":"themes/butterfly/scripts/helpers/series.js","hash":"17c0095bc8d612a268cdcab000b1742dc4c6f811","modified":1722500710243},{"_id":"themes/butterfly/scripts/tag/button.js","hash":"164d5f1c2d1b4cb5a813a6fc574016743a53c019","modified":1722500710244},{"_id":"themes/butterfly/scripts/tag/flink.js","hash":"3ba7677969ff01fab06fc6713455ddc6861f0024","modified":1722500710245},{"_id":"themes/butterfly/scripts/tag/gallery.js","hash":"7ec77b3093f5de67e7032f40a5b12f1389f6f6ff","modified":1722500710247},{"_id":"themes/butterfly/scripts/tag/hide.js","hash":"5d08c3552f7d3c80a724ca628bff66321abe2e5a","modified":1722500710248},{"_id":"themes/butterfly/scripts/tag/inlineImg.js","hash":"c863d2732ce4bdc084f2d0db92f50f80328c1007","modified":1722500710248},{"_id":"themes/butterfly/scripts/tag/label.js","hash":"b013dc0a3d57d2caa18b89263f23871da9ec456d","modified":1722500710248},{"_id":"themes/butterfly/scripts/tag/mermaid.js","hash":"289f85847c58f0b2b7d98a68e370a2896edb8949","modified":1722500710250},{"_id":"themes/butterfly/README_CN.md","hash":"9d729ee2ffc5e5f703ccfbfbbb7b286d59071125","modified":1722500710087},{"_id":"themes/butterfly/scripts/tag/score.js","hash":"35d54adc92e717cc32e13515122b025fd1a98ea2","modified":1722500710252},{"_id":"themes/butterfly/scripts/tag/note.js","hash":"e68d8d21f3a86e3646907a3685550ee20e8d4a9f","modified":1722500710252},{"_id":"themes/butterfly/scripts/tag/timeline.js","hash":"e611074a5a7f489a8b04afac0a3f7f882ce26532","modified":1722500710255},{"_id":"themes/butterfly/scripts/tag/series.js","hash":"dc56e5182dd3813dc977c9bf8556dcc7615e467b","modified":1722500710252},{"_id":"themes/butterfly/scripts/tag/tabs.js","hash":"7c448886f230adb4f4a0208c88fff809abcb5637","modified":1722500710254},{"_id":"themes/butterfly/source/css/index.styl","hash":"b13d96924a5534bff91d75566b196ac87b4fac22","modified":1722500710313},{"_id":"themes/butterfly/scripts/helpers/inject_head_js.js","hash":"b55f71347d2ead097c7f98c0ec792b091433345c","modified":1722500710239},{"_id":"themes/butterfly/README.md","hash":"20a91bea7f7ada8b8195d2abff106a7ce21bba20","modified":1722500710085},{"_id":"themes/butterfly/source/css/var.styl","hash":"950250f66faeb611a67540e0fa6cedbcf5a7a321","modified":1722500710313},{"_id":"themes/butterfly/.github/workflows/stale.yml","hash":"4040c76547e270aaf184e9b219a44ca41bbb1b9f","modified":1722500710082},{"_id":"themes/butterfly/source/img/favicon.png","hash":"3cf89864b4f6c9b532522a4d260a2e887971c92d","modified":1722500710317},{"_id":"themes/butterfly/source/js/main.js","hash":"59cd756a94ecdf3ec7b18f50691a8a6305f7a65a","modified":1722500710321},{"_id":"themes/butterfly/.github/workflows/publish.yml","hash":"e320b40c051bae1549156cd5ea4a51383cf78598","modified":1722500710080},{"_id":"themes/butterfly/source/js/utils.js","hash":"7b871fe0c4456660cff4c7b9cc4ed089adac2caf","modified":1722500710327},{"_id":"themes/butterfly/layout/includes/head/analytics.pug","hash":"c7666a10448edd93f5ace37296051b7670495f1b","modified":1722500710106},{"_id":"themes/butterfly/source/img/avatar.jpg","hash":"cb0941101c6a6b8f762ce6ffc3c948641e7f642f","modified":1722500710316},{"_id":"themes/butterfly/layout/includes/head/config.pug","hash":"39e1ca0a54eb5fd3688a78737417a1aaa50914c9","modified":1722500710108},{"_id":"themes/butterfly/layout/includes/head/config_site.pug","hash":"bd5dd5452e28a4fe94c3241a758ec6f4fdb7a149","modified":1722500710109},{"_id":"themes/butterfly/layout/includes/head/google_adsense.pug","hash":"f29123e603cbbcc6ce277d4e8f600ba67498077c","modified":1722500710109},{"_id":"themes/butterfly/layout/includes/head/pwa.pug","hash":"6dc2c9b85df9ab4f5b554305339fd80a90a6cf43","modified":1722500710113},{"_id":"themes/butterfly/layout/includes/head/site_verification.pug","hash":"5168caadc4cf541f5d6676a9c5e8ae47a948f9ad","modified":1722500710114},{"_id":"themes/butterfly/layout/includes/head/preconnect.pug","hash":"a7c929b90ae52b78b39b1728e3ab0e3db1cb7b9a","modified":1722500710111},{"_id":"themes/butterfly/layout/includes/header/index.pug","hash":"1bef867c799ba158c5417272fb137539951aa120","modified":1722500710117},{"_id":"themes/butterfly/layout/includes/header/nav.pug","hash":"962ee70a35e60a13c31eea47d16b9f98069fe417","modified":1722500710119},{"_id":"themes/butterfly/layout/includes/header/menu_item.pug","hash":"ca8bcd90ad9467819330bfe7c02b76322754bccf","modified":1722500710118},{"_id":"themes/butterfly/layout/includes/header/social.pug","hash":"7a641b5dd45b970e1dafd1433eb32ea149e55cf2","modified":1722500710121},{"_id":"themes/butterfly/layout/includes/header/post-info.pug","hash":"cc99b2dc5c6b1f74391b0da609853ebc11de9610","modified":1722500710120},{"_id":"themes/butterfly/layout/includes/loading/pace.pug","hash":"a6fde4835d6460ce7baf792fd5e1977fad73db25","modified":1722500710127},{"_id":"themes/butterfly/layout/includes/loading/index.pug","hash":"00ae419f527d8225a2dc03d4f977cec737248423","modified":1722500710126},{"_id":"themes/butterfly/layout/includes/loading/fullpage-loading.pug","hash":"766baca6ddce49d1724a02312387b292ff2d0bdc","modified":1722500710125},{"_id":"themes/butterfly/layout/includes/mixins/article-sort.pug","hash":"9155f01d4c644a2e19b2b13b2d3c6d5e34dd0abf","modified":1722500710128},{"_id":"themes/butterfly/layout/includes/page/categories.pug","hash":"5276a8d2835e05bd535fedc9f593a0ce8c3e8437","modified":1722500710131},{"_id":"themes/butterfly/layout/includes/mixins/post-ui.pug","hash":"90eb453b14f6b5c25bfd8d28aa67783603a1411d","modified":1722500710129},{"_id":"themes/butterfly/layout/includes/page/default-page.pug","hash":"e9459f122af7b733398578f9f0f8ab3c5e12a217","modified":1722500710131},{"_id":"themes/butterfly/layout/includes/page/tags.pug","hash":"12be059c536490af216a397e8f2a7abbf6d4610e","modified":1722500710134},{"_id":"themes/butterfly/layout/includes/page/flink.pug","hash":"e37681bc9c169d4220f26ecda2b3d5c02b6b9a0f","modified":1722500710133},{"_id":"themes/butterfly/layout/includes/post/reward.pug","hash":"912df10a053db3135968e92b6fd1a707ee94c968","modified":1722500710138},{"_id":"themes/butterfly/layout/includes/post/post-copyright.pug","hash":"0abad416b1974a17e5be7817931d5fe799180170","modified":1722500710136},{"_id":"themes/butterfly/layout/includes/third-party/aplayer.pug","hash":"e939344fd389aeb11864ee697d5fd9b036d8325f","modified":1722500710146},{"_id":"themes/butterfly/layout/includes/head/Open_Graph.pug","hash":"c8dbdfe6145a0bc6f7691c9551be8169a2698f0a","modified":1722500710105},{"_id":"themes/butterfly/layout/includes/third-party/pangu.pug","hash":"f0898509da70388b5c532f19e762756d74080200","modified":1722500710191},{"_id":"themes/butterfly/layout/includes/widget/card_ad.pug","hash":"a8312b527493dabbadbb1280760168d3bc909a3b","modified":1722500710204},{"_id":"themes/butterfly/layout/includes/third-party/effect.pug","hash":"43014bfc63583d3ee8808d526dd165848c0ed52f","modified":1722500710177},{"_id":"themes/butterfly/layout/includes/third-party/prismjs.pug","hash":"08979afbfecb4476a5ae8e360947b92624d285b8","modified":1722500710194},{"_id":"themes/butterfly/layout/includes/third-party/subtitle.pug","hash":"dfb5e16a7e7106bb20b2ac2d0df1251d0fc79609","modified":1722500710202},{"_id":"themes/butterfly/layout/includes/widget/card_announcement.pug","hash":"21e019bdc3b1e796bb00976bb29af2d51f873624","modified":1722500710205},{"_id":"themes/butterfly/layout/includes/widget/card_bottom_self.pug","hash":"1dba77d250eeebfb6e293d504352c7e9ea31980b","modified":1722500710208},{"_id":"themes/butterfly/layout/includes/widget/card_archives.pug","hash":"73d33b6930e7944187a4b3403daf25d27077a2dd","modified":1722500710206},{"_id":"themes/butterfly/layout/includes/widget/card_author.pug","hash":"ab037bf5794638bd30da4cf7cf106e5d03b5f696","modified":1722500710207},{"_id":"themes/butterfly/layout/includes/widget/card_categories.pug","hash":"66e383b4ef374951eb87dd1bf4cdb7a667193fb5","modified":1722500710209},{"_id":"themes/butterfly/layout/includes/widget/card_newest_comment.pug","hash":"8e22f53886a57a68286970d8af8b4c950fd4a1d7","modified":1722500710210},{"_id":"themes/butterfly/layout/includes/widget/card_post_series.pug","hash":"e0bb72fa0ce15964b11b8fe421cae3432394e35f","modified":1722500710210},{"_id":"themes/butterfly/layout/includes/widget/card_post_toc.pug","hash":"d48d77af1670bd568d784794408bf524a448bfcc","modified":1722500710211},{"_id":"themes/butterfly/layout/includes/widget/card_recent_post.pug","hash":"bb842d2aa6469d65bf06af1372f0a19a9e4ef44c","modified":1722500710214},{"_id":"themes/butterfly/layout/includes/widget/card_tags.pug","hash":"842b772a387b576550fa127030e1c2e9bf65716d","modified":1722500710215},{"_id":"themes/butterfly/layout/includes/widget/index.pug","hash":"8df529f71e25f1c0a00e533de7944ed3d1ba7bd8","modified":1722500710217},{"_id":"themes/butterfly/layout/includes/widget/card_top_self.pug","hash":"7b5ae404a1205546b7de4be42291315cf918f2b3","modified":1722500710215},{"_id":"themes/butterfly/layout/includes/widget/card_webinfo.pug","hash":"12185713f9ca08984fc74e3b69d8cd6828d23da8","modified":1722500710216},{"_id":"themes/butterfly/source/css/_highlight/highlight.styl","hash":"41054740cfbd1357138785464f6859681ca58493","modified":1722500710260},{"_id":"themes/butterfly/source/css/_highlight/theme.styl","hash":"3c178608406c31d768af355ef1d7326da37cc75f","modified":1722500710268},{"_id":"themes/butterfly/source/css/_global/function.styl","hash":"e920dae9ce00177922468db49240f5aca0af4f64","modified":1722500710258},{"_id":"themes/butterfly/source/css/_global/index.styl","hash":"0421da07907b3d98df64239e073b23fbb3f04149","modified":1722500710259},{"_id":"themes/butterfly/source/css/_layout/comments.styl","hash":"fbfce4d67cacd1df22fb73d89d008693f59d9d91","modified":1722500710273},{"_id":"themes/butterfly/source/css/_layout/aside.styl","hash":"aae70ddd126b2e40158e45036abecbfa33cbfbba","modified":1722500710270},{"_id":"themes/butterfly/source/css/_layout/chat.styl","hash":"792a04d36de32f230ca3256ad87a90fe8392f333","modified":1722500710272},{"_id":"themes/butterfly/source/css/_layout/footer.styl","hash":"5e27f7842af82ff7498d4b59787ce9ca90fa9e6f","modified":1722500710275},{"_id":"themes/butterfly/source/css/_layout/loading.styl","hash":"f0b01bbf321c2c24fdccaee367dd9fd448031a72","modified":1722500710277},{"_id":"themes/butterfly/source/css/_layout/pagination.styl","hash":"bd099f7d3adef4b7edd24c0a25a07415b156e587","modified":1722500710278},{"_id":"themes/butterfly/source/css/_layout/head.styl","hash":"dd5d9a5631b682610ea699541b8246ceaa56fddb","modified":1723206391886},{"_id":"themes/butterfly/layout/includes/third-party/pjax.pug","hash":"9b734d99963f3e7f562597dcf60485ccbf6e961c","modified":1722500710192},{"_id":"themes/butterfly/source/css/_layout/post.styl","hash":"7ae27854a737a02eca89b0b92db94cb298fef59e","modified":1722500710280},{"_id":"themes/butterfly/source/css/_layout/relatedposts.styl","hash":"6dcf19c0933c8828a439f801b0f4b256447dec07","modified":1722500710281},{"_id":"themes/butterfly/source/css/_layout/reward.styl","hash":"c0b11a1a5f52e3a6af4e312a8134c93eda18a7dd","modified":1722500710281},{"_id":"themes/butterfly/source/css/_layout/third-party.styl","hash":"15ea7564b2e3bf46bc91fb6e49c94d057b37caaf","modified":1722500710283},{"_id":"themes/butterfly/source/css/_layout/sidebar.styl","hash":"80ee9d0bfe5d38aac1f0cdcea5fc88b71d310041","modified":1722500710283},{"_id":"themes/butterfly/source/css/_mode/readmode.styl","hash":"a22fd15048d21452f0015d0765d295d730203308","modified":1723078297080},{"_id":"themes/butterfly/source/css/_mode/darkmode.styl","hash":"dbc855795a881f8c805bf5c9c5c4d5d542a648ec","modified":1722500710286},{"_id":"themes/butterfly/source/css/_page/404.styl","hash":"a7223a8fcc4fa7b81e552c9a2554be7df9de312e","modified":1722500710289},{"_id":"themes/butterfly/source/css/_page/categories.styl","hash":"68bc8cbea25dbb3cdc170f09f9b43ce130547717","modified":1722500710292},{"_id":"themes/butterfly/source/css/_page/archives.styl","hash":"5dd1ba997741d02894ff846eda939ad8051c0bb2","modified":1722500710290},{"_id":"themes/butterfly/source/css/_page/flink.styl","hash":"ecc2b2e28c179eb9406fc2c6f00e141078249cdd","modified":1722500710294},{"_id":"themes/butterfly/source/css/_page/common.styl","hash":"df7a51fcabbadab5aa31770e3202a47c9599bbb7","modified":1722500710293},{"_id":"themes/butterfly/source/css/_page/tags.styl","hash":"9e35f91847773b915c74a78b8aa66c7bdb950ad0","modified":1722500710296},{"_id":"themes/butterfly/source/css/_page/homepage.styl","hash":"a977cd8161ef4d6ddd5293e81403519076657430","modified":1722500710295},{"_id":"themes/butterfly/source/img/404.jpg","hash":"fb4489bc1d30c93d28f7332158c1c6c1416148de","modified":1722500710315},{"_id":"themes/butterfly/source/css/_search/index.styl","hash":"0b23010154e19f37f0c4af0110f9f834d6d41a13","modified":1722500710298},{"_id":"themes/butterfly/source/css/_search/local-search.styl","hash":"8a53d7ba5ca2f5eb4124b684e7845b648583f658","modified":1722500710301},{"_id":"themes/butterfly/source/css/_tags/button.styl","hash":"62da1de0d5b8453fcecbfacddb16985265638ba5","modified":1722500710302},{"_id":"themes/butterfly/source/css/_tags/gallery.styl","hash":"3e9355b76f87e2ee90f652855282b37ab5ae0b3e","modified":1722500710304},{"_id":"themes/butterfly/source/css/_tags/hexo.styl","hash":"985b183db7b7bfd8f9bdb60494549fb7f850348b","modified":1722500710305},{"_id":"themes/butterfly/source/css/_tags/hide.styl","hash":"b7cf7753479fcf2fe07287ffdb0e568adbba4c18","modified":1722500710306},{"_id":"themes/butterfly/source/css/_tags/note.styl","hash":"4929382bd60788d34752a66e2fe764ef797a72a0","modified":1722500710308},{"_id":"themes/butterfly/source/css/_tags/inlineImg.styl","hash":"5a873d01fabebcf7ddf7a6b1c2e2e5e2714097f4","modified":1722500710307},{"_id":"themes/butterfly/source/css/_tags/label.styl","hash":"2f83bd145b870d80d4b18b0ac603235229a5694e","modified":1722500710307},{"_id":"themes/butterfly/source/css/_tags/tabs.styl","hash":"353b95f9a6c2c1e777d978118cb61f909ccbf89c","modified":1722500710309},{"_id":"themes/butterfly/source/img/friend_404.gif","hash":"8d2d0ebef70a8eb07329f57e645889b0e420fa48","modified":1722500710319},{"_id":"themes/butterfly/source/css/_tags/timeline.styl","hash":"07ea7134db7a66c87658116f089fb1a2a6906563","modified":1722500710310},{"_id":"themes/butterfly/source/css/_third-party/normalize.min.css","hash":"8549829fb7d3c21cd9e119884962e8c463a4a267","modified":1722500710312},{"_id":"themes/butterfly/source/js/search/local-search.js","hash":"ab3904451ae1d78903424b8b2ef815c8571e1749","modified":1722500710325},{"_id":"themes/butterfly/layout/includes/third-party/abcjs/abcjs.pug","hash":"8f95aca305b56ccd7c8c7367b03d26db816ebd5f","modified":1722500710143},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/disqus.pug","hash":"d6fff5a7f84c8b09f282f9ddc0020a68a8aac9ea","modified":1722500710148},{"_id":"themes/butterfly/layout/includes/third-party/abcjs/index.pug","hash":"58f37823f6cd9a194fb50f7ca7c2233e49939034","modified":1722500710144},{"_id":"themes/butterfly/source/css/_layout/rightside.styl","hash":"0322237e762db401d7b4aa33168d0b9334a9ec26","modified":1722500710282},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/index.pug","hash":"846cabae287ae31b3bbfac3da022475713dd5ecc","modified":1722500710151},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/artalk.pug","hash":"b03ee8625149191f9d5d057bbc9824b68d8dd0c4","modified":1722500710147},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/remark42.pug","hash":"716dc463fe4ef5112e7018ed60804125fdfa5cad","modified":1722500710151},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/valine.pug","hash":"cd4fc9c5a61608a5dedf645c1295430a1623040f","modified":1722500710153},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/fb.pug","hash":"4b98145d6584d586cabf033493282afc72ae816a","modified":1722500710149},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/twikoo.pug","hash":"7e233f872aea6fd6beccdc9efd86b1bf9ec9f12d","modified":1722500710152},{"_id":"themes/butterfly/layout/includes/third-party/card-post-count/waline.pug","hash":"fd2320ee25507bb8ef49f932c2d170586b44ea4d","modified":1722500710154},{"_id":"themes/butterfly/layout/includes/third-party/chat/chatra.pug","hash":"08a85e52fc800d3562df869e5e2613313e76fce6","modified":1722500710156},{"_id":"themes/butterfly/layout/includes/third-party/chat/crisp.pug","hash":"09d2ab2570b67e6f09244a898ccab5567cb82ace","modified":1722500710156},{"_id":"themes/butterfly/layout/includes/third-party/chat/index.pug","hash":"1157118db9f5d7c0c5a0fc7c346f6e934ca00d52","modified":1722500710159},{"_id":"themes/butterfly/layout/includes/third-party/chat/daovoice.pug","hash":"0d960849d5b05d27ec87627b983ca35f2411b9e8","modified":1722500710158},{"_id":"themes/butterfly/layout/includes/third-party/chat/messenger.pug","hash":"799da8f3015e6fe440681b21644bcb3810a5518c","modified":1722500710160},{"_id":"themes/butterfly/layout/includes/third-party/chat/tidio.pug","hash":"6d40b521eec4136f6742c548a4445ed593470b1b","modified":1722500710161},{"_id":"themes/butterfly/layout/includes/third-party/comments/artalk.pug","hash":"5373b822aa72ddb96f2f1f4baf6c058b40d705d6","modified":1722500710163},{"_id":"themes/butterfly/layout/includes/third-party/comments/disqus.pug","hash":"364d1fd655baca9132038ef1e312abde2c0bc7de","modified":1722500710164},{"_id":"themes/butterfly/layout/includes/third-party/comments/disqusjs.pug","hash":"f78c9c20c86d58c7cf099f6f8d6097103d7d43e5","modified":1722500710165},{"_id":"themes/butterfly/layout/includes/third-party/comments/giscus.pug","hash":"1eab7ca1cb16c6786f9c3ca0efef8cc15e444ab4","modified":1722500710167},{"_id":"themes/butterfly/layout/includes/third-party/comments/facebook_comments.pug","hash":"11f5dca1432e59f22955aaf4ac3e9de6b286d887","modified":1722500710166},{"_id":"themes/butterfly/layout/includes/third-party/comments/gitalk.pug","hash":"1c86c8fc1a28514a02a1f6a25ca9ec05eb3955b7","modified":1722500710168},{"_id":"themes/butterfly/layout/includes/third-party/comments/index.pug","hash":"db6713d2b90eb8183f86ac92c26761a8501c0ddb","modified":1722500710169},{"_id":"themes/butterfly/layout/includes/third-party/comments/js.pug","hash":"3abbaaa4ea575c45b3cebffd40bad1acc6ffce84","modified":1722500710170},{"_id":"themes/butterfly/layout/includes/third-party/comments/livere.pug","hash":"09c2ef4bc6d005f96dfa48b1d9af1ec095c5266d","modified":1722500710171},{"_id":"themes/butterfly/layout/includes/third-party/comments/remark42.pug","hash":"7f450664e6323a076ae59c393b0f22167cfa82e5","modified":1722500710172},{"_id":"themes/butterfly/layout/includes/third-party/comments/twikoo.pug","hash":"9942a903227350960c1d0716e59516ae79ac24a8","modified":1722500710173},{"_id":"themes/butterfly/layout/includes/third-party/comments/utterances.pug","hash":"b65a42167df5fb07e2a63f312a58c321d3112a90","modified":1722500710174},{"_id":"themes/butterfly/layout/includes/third-party/comments/valine.pug","hash":"4ed7c74087e81c6fcaf4fca7dced58b4e19f4cb1","modified":1722500710175},{"_id":"themes/butterfly/layout/includes/third-party/math/index.pug","hash":"2afa4c21dd19890f47fb568cfb0d90efb676a253","modified":1722500710179},{"_id":"themes/butterfly/layout/includes/third-party/math/katex.pug","hash":"f0d3eddd2bed68e5517274b3530bfe0fa5057d8e","modified":1722500710180},{"_id":"themes/butterfly/layout/includes/third-party/comments/waline.pug","hash":"efb72547fc2d470a124f5636391128dc59627498","modified":1722500710176},{"_id":"themes/butterfly/layout/includes/third-party/math/mathjax.pug","hash":"bb944185f4bb9f9a9b9d70ee215f66ccd6d4c6cf","modified":1722500710181},{"_id":"themes/butterfly/layout/includes/third-party/math/mermaid.pug","hash":"c682e4d61017fb0dd2e837bfcc242371f1a13364","modified":1722500710182},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/artalk.pug","hash":"2e36fac4791e99844cd56676898be0dbf5eb4e99","modified":1722500710184},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/index.pug","hash":"f8b65460c399973090c1fb7ab81e3708c252e7cc","modified":1722500710187},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/disqus-comment.pug","hash":"d8898e427acd91ceb97d6a7ee3acb011ca86b9fc","modified":1722500710184},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/remark42.pug","hash":"a4e52188b6effeee1df2a01dcbf4105de76a61a8","modified":1722500710188},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/twikoo-comment.pug","hash":"17520a86de12ae585289463c066d3ac91b78a2ff","modified":1722500710188},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/valine.pug","hash":"ecfff55b2c7f6d87ce4d5028fdf9f8c0bf155c73","modified":1722500710189},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/waline.pug","hash":"0544d91c0bc9e26e0fe1b5ff490f4a8540ed1ee1","modified":1722500710191},{"_id":"themes/butterfly/layout/includes/third-party/newest-comments/github-issues.pug","hash":"fc8814bd016d039874ec2fc24dcb78587892e2a6","modified":1722500710186},{"_id":"themes/butterfly/layout/includes/third-party/search/algolia.pug","hash":"90338ac4cd114d324fe1caaaeea8be9ca05d6a46","modified":1722500710195},{"_id":"themes/butterfly/layout/includes/third-party/search/index.pug","hash":"3adcf28a8d205ea3ee19828eda0e668702fac07a","modified":1722500710197},{"_id":"themes/butterfly/layout/includes/third-party/search/docsearch.pug","hash":"52a06a2e039f44383085333cac69f3f4e7d0ad3a","modified":1722500710196},{"_id":"themes/butterfly/layout/includes/third-party/search/local-search.pug","hash":"420a86e73d0d748ac234fd00d06d9e433ca5e3f2","modified":1722500710198},{"_id":"themes/butterfly/layout/includes/third-party/share/addtoany.pug","hash":"1f02a26730e5f36cc2dfec7ff4d5c93a099ed5ba","modified":1722500710199},{"_id":"themes/butterfly/layout/includes/third-party/share/index.pug","hash":"c16ee69b5ca8db016db0508d014ae0867c4ce929","modified":1722500710201},{"_id":"themes/butterfly/layout/includes/third-party/share/share-js.pug","hash":"8106bd031586f075a994956ee4438eb13be25d7b","modified":1722500710202},{"_id":"themes/butterfly/source/css/_highlight/highlight/diff.styl","hash":"6e77f1ca0cfb0db6b028f5c0238780e66d344f3d","modified":1722500710263},{"_id":"themes/butterfly/source/css/_highlight/highlight/index.styl","hash":"fc702a4614d0562a381907b083f71ba63d301d86","modified":1722500710264},{"_id":"themes/butterfly/source/css/_search/algolia.styl","hash":"37db99299af380e9111dce2a78a5049b301b13e0","modified":1722500710298},{"_id":"themes/butterfly/source/js/tw_cn.js","hash":"d776c670e4076ad6049dbb64cdee7a734b51d37f","modified":1722500710326},{"_id":"themes/butterfly/source/css/_highlight/prismjs/diff.styl","hash":"1309292f1c8c53d96cd7333507b106bcc24ca8fc","modified":1722500710265},{"_id":"themes/butterfly/source/js/search/algolia.js","hash":"a7c2fe73cc05ad3525909b86ad0ede1a9f2d3b48","modified":1722500710323},{"_id":"themes/butterfly/source/css/_highlight/prismjs/index.styl","hash":"01ff9e77eb1bd454bec65a6ff5972c8e219bc708","modified":1722500710267},{"_id":"source/img/000001.png","hash":"ad86c3b72174364d462bdab1d09540bd79eb123c","modified":1725979378674},{"_id":"source/img/machinelearning/cros-valid.png","hash":"8f9b204b651f93c17ad0856daa628d6abe985d97","modified":1736768886866},{"_id":"themes/butterfly/source/css/_highlight/prismjs/line-number.styl","hash":"7c9cc43e1d2577f7151039d58e603c30860fd281","modified":1722500710267},{"_id":"source/img/yiyuan.png","hash":"817a89509a8ebcddff6b369979d53ecf44a30a9f","modified":1722998234119},{"_id":"source/img/peiqian.png","hash":"2f077f1fff014ee448cd58b57ff83901702e2d88","modified":1723000686874},{"_id":"source/img/dingyue.png","hash":"c6afcd1124d84f07caeefcb895be3f3a5b301678","modified":1723001642809},{"_id":"source/img/site01.jpg","hash":"d93084432feb123fd5d781210c3a2c4db43c10ec","modified":1722524985872},{"_id":"public/search.xml","hash":"c74441d8486d2c420c4a9ec910968232d6c9734e","modified":1736846566584},{"_id":"public/categories/index.html","hash":"4cdbd3cb944cfabfbed8e61e3a37b5c9b83b7396","modified":1736846566584},{"_id":"public/archives/page/2/index.html","hash":"63e4f26007a93f524053a1e0cb6bc51bd6d8a238","modified":1736846566584},{"_id":"public/archives/2024/page/2/index.html","hash":"cef75c1ad6ecc3546d9c3b3d85d5fa5abc4496e4","modified":1736846566584},{"_id":"public/archives/2024/08/index.html","hash":"3a1fe1a2e2f8c7b522abb2288f03b6c860fcd961","modified":1736846566584},{"_id":"public/archives/2024/09/index.html","hash":"57f8c6aece22366a588aa014929ce938fc817834","modified":1736846566584},{"_id":"public/archives/2025/index.html","hash":"9570ac1a063dceff1c65cbb86c23d45d5ba13c16","modified":1736846566584},{"_id":"public/archives/2025/01/index.html","hash":"4992dcc3b763a79da912785b626af031dd0b51d2","modified":1736846566584},{"_id":"public/categories/古文观止/index.html","hash":"38a7c71204d6250eeb64835da114d81d29a96bd3","modified":1736846566584},{"_id":"public/tags/machinelearning/index.html","hash":"b61b88237972d9339ea47985786d95b867621ccd","modified":1736846566584},{"_id":"public/tags/uniapp/index.html","hash":"38560f6232dc61e37896693dac7f6fd62c442e17","modified":1736846566584},{"_id":"public/tags/古文观止/index.html","hash":"ac2e51c1948e633ea366d285d32bacd49a084be9","modified":1736846566584},{"_id":"public/tags/网络代理/index.html","hash":"30586ee6155b79d03b974461ec191617423ee9e9","modified":1736846566584},{"_id":"public/tags/index.html","hash":"993995c0a7fbb2c2fdac52a97c3c5cbb192666e1","modified":1736846566584},{"_id":"public/about/index.html","hash":"c9c4d571d48afaa58272256a09013db6fba5e325","modified":1736846566584},{"_id":"public/music/index.html","hash":"f976d20fe5d54263abb781b080d605cf1b9bdf12","modified":1736846566584},{"_id":"public/link/index.html","hash":"2516665d0cddc0f91ff97869fcad08a82a591c69","modified":1736846566584},{"_id":"public/movies/index.html","hash":"d99e71fca8a463d766356d8977d83d1b5d9eaa8c","modified":1736846566584},{"_id":"public/posts/29139.html","hash":"536395be9bf8e4760b17d09c676f6a77fe1fd976","modified":1736846566584},{"_id":"public/posts/61253.html","hash":"f923eb1311298fb34787e401568b2f5e041c7677","modified":1736846566584},{"_id":"public/posts/61251.html","hash":"b172067125d2ea1d72a67b045f098fbcde1b38e1","modified":1736846566584},{"_id":"public/posts/61252.html","hash":"3cbe1345d3737f098d0bebee66ec05d3b4a9555e","modified":1736846566584},{"_id":"public/posts/16107.html","hash":"34386a72d3dfd4c6b091f8aac2072e39eaed6feb","modified":1736846566584},{"_id":"public/posts/34849.html","hash":"7b7588271de905a37a1fcdf73424779cf3373a66","modified":1736846566584},{"_id":"public/posts/58638.html","hash":"a7f6b8cb183cfce43486303b778533e11bf85e7f","modified":1736846566584},{"_id":"public/posts/41168.html","hash":"d40b9a14d1e4b6ed6f7fbcad5b14cce52cb70b51","modified":1736846566584},{"_id":"public/posts/14011.html","hash":"a1dc0b992ff4c4c477b38d3c88c21f97225a0987","modified":1736846566584},{"_id":"public/posts/58817.html","hash":"fa05234a4f30849f8c1307ae8d3ba122215061e5","modified":1736846566584},{"_id":"public/posts/33957.html","hash":"9a5a5c409a390417117b576f178da8caa98f9220","modified":1736846566584},{"_id":"public/posts/47807.html","hash":"092726e6acee2bacc8f0f31382f914aba7459dc7","modified":1736846566584},{"_id":"public/posts/1441.html","hash":"0292564cd4084af24d2b19cd872897d4039cb9a5","modified":1736846566584},{"_id":"public/archives/index.html","hash":"f75ae491b1e1816d291da92a8993e0f21111ae85","modified":1736846566584},{"_id":"public/archives/2024/index.html","hash":"2a435300c19f4c09ebedf0e9a684cf8c4cb63d15","modified":1736846566584},{"_id":"public/index.html","hash":"b5a4f632368315255b3f8394b71fbb27ac0d3bae","modified":1736846566584},{"_id":"public/page/2/index.html","hash":"bb806417c4a65025c66264aab257b801dd09843b","modified":1736846566584},{"_id":"public/img/avatar.jpg","hash":"cb0941101c6a6b8f762ce6ffc3c948641e7f642f","modified":1736846566584},{"_id":"public/img/404.jpg","hash":"fb4489bc1d30c93d28f7332158c1c6c1416148de","modified":1736846566584},{"_id":"public/img/favicon.png","hash":"3cf89864b4f6c9b532522a4d260a2e887971c92d","modified":1736846566584},{"_id":"public/img/friend_404.gif","hash":"8d2d0ebef70a8eb07329f57e645889b0e420fa48","modified":1736846566584},{"_id":"public/img/machinelearning/kfold-skfold.png","hash":"ab841f5412b7ea773468a3facff643df68a88c01","modified":1736846566584},{"_id":"public/img/machinelearning/knn-01.png","hash":"2a931981d1d5f72d6b42b39b8ef313569eab853e","modified":1736846566584},{"_id":"public/css/var.css","hash":"da39a3ee5e6b4b0d3255bfef95601890afd80709","modified":1736846566584},{"_id":"public/js/search/local-search.js","hash":"e1f60ebac53a3f596fd0a4769b4f9275c48c6542","modified":1736846566584},{"_id":"public/js/utils.js","hash":"8e6b48d294e7aeaba8ff6348c43b2271cf865547","modified":1736846566584},{"_id":"public/js/search/algolia.js","hash":"108988d046da9a4716148df43b3975217c8ceaae","modified":1736846566584},{"_id":"public/css/index.css","hash":"a86592daf1fcadb01092b449a0eb5100fc2351fb","modified":1736846566584},{"_id":"public/js/main.js","hash":"0dac585446445e0c419b86eec5580bc9b0657dc6","modified":1736846566584},{"_id":"public/js/tw_cn.js","hash":"f8d2e3f31468991a7f5171cbfdb157dfb86d3372","modified":1736846566584},{"_id":"public/img/machinelearning/cros-valid.png","hash":"8f9b204b651f93c17ad0856daa628d6abe985d97","modified":1736846566584},{"_id":"public/img/000001.png","hash":"ad86c3b72174364d462bdab1d09540bd79eb123c","modified":1736846566584},{"_id":"public/img/yiyuan.png","hash":"817a89509a8ebcddff6b369979d53ecf44a30a9f","modified":1736846566584},{"_id":"public/img/peiqian.png","hash":"2f077f1fff014ee448cd58b57ff83901702e2d88","modified":1736846566584},{"_id":"public/img/dingyue.png","hash":"c6afcd1124d84f07caeefcb895be3f3a5b301678","modified":1736846566584},{"_id":"public/img/site01.jpg","hash":"d93084432feb123fd5d781210c3a2c4db43c10ec","modified":1736846566584}],"Category":[{"name":"古文观止","_id":"cm5w9lrdp000htgahcgif26p4"}],"Data":[{"_id":"link","data":[{"class_name":"友情鏈接","class_desc":"那些人,那些事","link_list":[{"name":"Hexo","link":"https://hexo.io/zh-tw/","avatar":"https://d33wubrfki0l68.cloudfront.net/6657ba50e702d84afb32fe846bed54fba1a77add/827ae/logo.svg","descr":"快速、簡單且強大的網誌框架"}]},{"class_name":"網站","class_desc":"值得推薦的網站","link_list":[{"name":"Youtube","link":"https://www.youtube.com/","avatar":"https://i.loli.net/2020/05/14/9ZkGg8v3azHJfM1.png","descr":"視頻網站"},{"name":"Weibo","link":"https://www.weibo.com/","avatar":"https://i.loli.net/2020/05/14/TLJBum386vcnI1P.png","descr":"中國最大社交分享平台"},{"name":"Twitter","link":"https://twitter.com/","avatar":"https://i.loli.net/2020/05/14/5VyHPQqR6LWF39a.png","descr":"社交分享平台"}]}]}],"Page":[{"title":"About me","date":"2024-08-10T02:35:41.000Z","_content":"\n落花飞舞,翩若惊鸿。\n","source":"about/index.md","raw":"---\ntitle: About me\ndate: 2024-08-10 10:35:41\n---\n\n落花飞舞,翩若惊鸿。\n","updated":"2024-08-10T02:38:14.626Z","path":"about/index.html","comments":1,"layout":"page","_id":"cm5w9lrd90000tgah6s394dm1","content":"

落花飞舞,翩若惊鸿。

\n","cover":false,"excerpt":"","more":"

落花飞舞,翩若惊鸿。

\n"},{"title":"categories","date":"2024-07-31T00:33:49.000Z","aside":false,"top_img":false,"type":"categories","_content":"\n### category","source":"categories/index.md","raw":"---\ntitle: categories\ndate: 2024-07-31 08:33:49\naside: false\ntop_img: false\ntype: \"categories\"\n---\n\n### category","updated":"2024-07-31T06:37:23.090Z","path":"categories/index.html","comments":1,"layout":"page","_id":"cm5w9lrdg0002tgahhfj95eri","content":"

category

","cover":false,"excerpt":"","more":"

category

"},{"title":"tags","date":"2024-07-31T00:32:38.000Z","type":"tags","comments":0,"top_img":false,"_content":"","source":"tags/index.md","raw":"---\ntitle: tags\ndate: 2024-07-31 08:32:38\ntype: \"tags\"\ncomments: false\ntop_img: false\n---\n","updated":"2024-07-31T00:33:00.076Z","path":"tags/index.html","layout":"page","_id":"cm5w9lrdh0004tgah1cwhcbiv","content":"","cover":false,"excerpt":"","more":""},{"title":"Music","date":"2024-08-10T02:40:19.000Z","_content":"","source":"music/index.md","raw":"---\ntitle: Music\ndate: 2024-08-10 10:40:19\n---\n","updated":"2024-08-10T02:40:19.897Z","path":"music/index.html","comments":1,"layout":"page","_id":"cm5w9lrdj0006tgaha0vu4yha","content":"","cover":false,"excerpt":"","more":""},{"title":"link","date":"2024-08-10T02:42:35.000Z","type":"link","_content":"","source":"link/index.md","raw":"---\ntitle: link\ndate: 2024-08-10 10:42:35\ntype: \"link\"\n---\n","updated":"2024-08-10T02:42:48.927Z","path":"link/index.html","comments":1,"layout":"page","_id":"cm5w9lrdl0009tgah5j9mfltg","content":"","cover":false,"excerpt":"","more":""},{"title":"Movies","date":"2024-08-10T02:40:33.000Z","_content":"","source":"movies/index.md","raw":"---\ntitle: Movies\ndate: 2024-08-10 10:40:33\n---\n","updated":"2024-08-10T02:40:33.715Z","path":"movies/index.html","comments":1,"layout":"page","_id":"cm5w9lrdm000btgah0epg77s3","content":"","cover":false,"excerpt":"","more":""}],"Post":[{"title":"script","abbrlink":34849,"date":"2024-08-17T03:09:24.000Z","_content":"\n### 查看CPU、内存使用率\n```bash\n#!/bin/bash\n\n# 定义颜色\nRED='\\033[0;31m'\nGREEN='\\033[0;32m'\nYELLOW='\\033[0;33m'\nBLUE='\\033[0;34m'\nNC='\\033[0m' # 无颜色\n\nwhile true; do\n # 获取所有进程的CPU使用率和内存使用率\n cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')\n mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')\n \n # 打印结果,带有时间戳、分隔线和颜色高亮\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${YELLOW}Timestamp: $(date)${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}\"\n echo -e \"${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n \n # 等待5秒后再次执行\n sleep 5\ndone\n\n```\n**保存脚本到/usr/local/bin目录下**\n```bash\n mv usage.sh /usr/local/bin/usage\n```\n\n### Shell脚本编写的基本信息\n\n```bash\n#! /bin/bash\n# -------------------------------------------------\n# Filename: test.sh\n# Version: 1.0\n# Date: 2024/05/02\n# Author: shenjianZ\n# Email: shenjianZLT@gmail.com\n# Website: https://blog.shenjianl.cn\n# Description: this is a test shell\n# CopyRight: 2024 All rights reserved shenjianZ\n# License GPL\n# ------------------------------------------------\n\n\n# Your script logic goes here\n```","source":"_posts/linux/script.md","raw":"---\ntitle: script\nabbrlink: 34849\ndate: 2024-08-17 11:09:24\ntags:\n---\n\n### 查看CPU、内存使用率\n```bash\n#!/bin/bash\n\n# 定义颜色\nRED='\\033[0;31m'\nGREEN='\\033[0;32m'\nYELLOW='\\033[0;33m'\nBLUE='\\033[0;34m'\nNC='\\033[0m' # 无颜色\n\nwhile true; do\n # 获取所有进程的CPU使用率和内存使用率\n cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')\n mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')\n \n # 打印结果,带有时间戳、分隔线和颜色高亮\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${YELLOW}Timestamp: $(date)${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n echo -e \"${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}\"\n echo -e \"${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}\"\n echo -e \"${BLUE}==============================${NC}\"\n \n # 等待5秒后再次执行\n sleep 5\ndone\n\n```\n**保存脚本到/usr/local/bin目录下**\n```bash\n mv usage.sh /usr/local/bin/usage\n```\n\n### Shell脚本编写的基本信息\n\n```bash\n#! /bin/bash\n# -------------------------------------------------\n# Filename: test.sh\n# Version: 1.0\n# Date: 2024/05/02\n# Author: shenjianZ\n# Email: shenjianZLT@gmail.com\n# Website: https://blog.shenjianl.cn\n# Description: this is a test shell\n# CopyRight: 2024 All rights reserved shenjianZ\n# License GPL\n# ------------------------------------------------\n\n\n# Your script logic goes here\n```","slug":"linux/script","published":1,"updated":"2024-08-18T16:05:45.731Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdc0001tgah0jou53lf","content":"

查看CPU、内存使用率

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#!/bin/bash

# 定义颜色
RED='\\033[0;31m'
GREEN='\\033[0;32m'
YELLOW='\\033[0;33m'
BLUE='\\033[0;34m'
NC='\\033[0m' # 无颜色

while true; do
# 获取所有进程的CPU使用率和内存使用率
cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')
mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')

# 打印结果,带有时间戳、分隔线和颜色高亮
echo -e "${BLUE}==============================${NC}"
echo -e "${YELLOW}Timestamp: $(date)${NC}"
echo -e "${BLUE}==============================${NC}"
echo -e "${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}"
echo -e "${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}"
echo -e "${BLUE}==============================${NC}"

# 等待5秒后再次执行
sleep 5
done

\n

保存脚本到/usr/local/bin目录下

\n
1
mv usage.sh /usr/local/bin/usage
\n\n

Shell脚本编写的基本信息

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#! /bin/bash
# -------------------------------------------------
# Filename: test.sh
# Version: 1.0
# Date: 2024/05/02
# Author: shenjianZ
# Email: shenjianZLT@gmail.com
# Website: https://blog.shenjianl.cn
# Description: this is a test shell
# CopyRight: 2024 All rights reserved shenjianZ
# License GPL
# ------------------------------------------------


# Your script logic goes here
","cover":false,"excerpt":"","more":"

查看CPU、内存使用率

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#!/bin/bash

# 定义颜色
RED='\\033[0;31m'
GREEN='\\033[0;32m'
YELLOW='\\033[0;33m'
BLUE='\\033[0;34m'
NC='\\033[0m' # 无颜色

while true; do
# 获取所有进程的CPU使用率和内存使用率
cpu_usage=$(ps aux | awk '{sum_cpu += $3} END {print sum_cpu}')
mem_usage=$(ps aux | awk '{sum_mem += $4} END {print sum_mem}')

# 打印结果,带有时间戳、分隔线和颜色高亮
echo -e "${BLUE}==============================${NC}"
echo -e "${YELLOW}Timestamp: $(date)${NC}"
echo -e "${BLUE}==============================${NC}"
echo -e "${GREEN}Total CPU usage: ${RED}$cpu_usage%${NC}"
echo -e "${GREEN}Total Memory usage: ${RED}$mem_usage%${NC}"
echo -e "${BLUE}==============================${NC}"

# 等待5秒后再次执行
sleep 5
done

\n

保存脚本到/usr/local/bin目录下

\n
1
mv usage.sh /usr/local/bin/usage
\n\n

Shell脚本编写的基本信息

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#! /bin/bash
# -------------------------------------------------
# Filename: test.sh
# Version: 1.0
# Date: 2024/05/02
# Author: shenjianZ
# Email: shenjianZLT@gmail.com
# Website: https://blog.shenjianl.cn
# Description: this is a test shell
# CopyRight: 2024 All rights reserved shenjianZ
# License GPL
# ------------------------------------------------


# Your script logic goes here
"},{"title":"Hello World","abbrlink":16107,"_content":"Welcome to [Hexo](https://hexo.io/)! This is your very first post. Check [documentation](https://hexo.io/docs/) for more info. If you get any problems when using Hexo, you can find the answer in [troubleshooting](https://hexo.io/docs/troubleshooting.html) or you can ask me on [GitHub](https://github.com/hexojs/hexo/issues).\n\n## Quick Start\n\n### Create a new post\n\n``` bash\n$ hexo new \"My New Post\"\n```\n\nMore info: [Writing](https://hexo.io/docs/writing.html)\n\n### Run server\n\n``` bash\n$ hexo server\n```\n\nMore info: [Server](https://hexo.io/docs/server.html)\n\n### Generate static files\n\n``` bash\n$ hexo generate\n```\n\nMore info: [Generating](https://hexo.io/docs/generating.html)\n\n### Deploy to remote sites\n\n``` bash\n$ hexo deploy\n```\n\nMore info: [Deployment](https://hexo.io/docs/one-command-deployment.html)\n","source":"_posts/hello-world.md","raw":"---\ntitle: Hello World\nabbrlink: 16107\n---\nWelcome to [Hexo](https://hexo.io/)! This is your very first post. Check [documentation](https://hexo.io/docs/) for more info. If you get any problems when using Hexo, you can find the answer in [troubleshooting](https://hexo.io/docs/troubleshooting.html) or you can ask me on [GitHub](https://github.com/hexojs/hexo/issues).\n\n## Quick Start\n\n### Create a new post\n\n``` bash\n$ hexo new \"My New Post\"\n```\n\nMore info: [Writing](https://hexo.io/docs/writing.html)\n\n### Run server\n\n``` bash\n$ hexo server\n```\n\nMore info: [Server](https://hexo.io/docs/server.html)\n\n### Generate static files\n\n``` bash\n$ hexo generate\n```\n\nMore info: [Generating](https://hexo.io/docs/generating.html)\n\n### Deploy to remote sites\n\n``` bash\n$ hexo deploy\n```\n\nMore info: [Deployment](https://hexo.io/docs/one-command-deployment.html)\n","slug":"hello-world","published":1,"date":"2024-09-11T00:01:10.419Z","updated":"2024-08-09T12:21:50.026Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdg0003tgah9d6e9nmn","content":"

Welcome to Hexo! This is your very first post. Check documentation for more info. If you get any problems when using Hexo, you can find the answer in troubleshooting or you can ask me on GitHub.

\n

Quick Start

Create a new post

1
$ hexo new "My New Post"
\n\n

More info: Writing

\n

Run server

1
$ hexo server
\n\n

More info: Server

\n

Generate static files

1
$ hexo generate
\n\n

More info: Generating

\n

Deploy to remote sites

1
$ hexo deploy
\n\n

More info: Deployment

\n","cover":false,"excerpt":"","more":"

Welcome to Hexo! This is your very first post. Check documentation for more info. If you get any problems when using Hexo, you can find the answer in troubleshooting or you can ask me on GitHub.

\n

Quick Start

Create a new post

1
$ hexo new "My New Post"
\n\n

More info: Writing

\n

Run server

1
$ hexo server
\n\n

More info: Server

\n

Generate static files

1
$ hexo generate
\n\n

More info: Generating

\n

Deploy to remote sites

1
$ hexo deploy
\n\n

More info: Deployment

\n"},{"title":"k近邻算法(K-Nearest Neighbors)KNN","abbrlink":29139,"mathjax":true,"date":"2025-01-13T09:20:59.000Z","_content":"## **k近邻算法(K-Nearest Neighbors)KNN**\n将当前样本的类别归类于距离最近的**k**个样本的类别\n\n#### **距离公式(2维)**\n\n- 欧式距离\n$$\nd = \\sqrt{(x_1-y_1)^2 + (x_2 - y_2)^2}\n$$\n- 曼哈顿距离\n$$\nd = |x_1 - x_2| + |y_1 - y_2|\n$$\n- 切比雪夫距离\n$$\nd = \\max\\left(|x_1 - x_2|, |y_1 - y_2|\\right)\n$$\n#### k值选择问题\n\n| k值 | 影响 |\n| --- | ------------------ |\n| 越大 | 模型过拟合,准确率波动较大 |\n| 越小 | 模型欠拟合,准确率趋于稳定但可能较低 |\n### 特征预处理\n> 通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程 \n- 归一化\n 将数据变换到指定区间(默认是\\[0,1\\])\n $$ x' = \\frac{x- x_{\\text {min}}}{x_{\\text{max}} - x_{\\text{min}}} $$\n 若需要缩放到任意区间 \\(\\[a, b\\]\\),公式为: $$ x' = a + \\frac{(x - x_{\\text{min}}) \\cdot (b - a)}{x_{\\text{max}} - x_{\\text{min}}} $$\n 其中:\\( \\[a, b\\] \\):目标区间的范围\n 归一化受到数据集的异常值的影响,需要进行标准化处理(更加合理)\n ``` python\n\t from sklearn.preprocessing import MinMaxScaler # 归一化\n\t```\n- 标准化\n 将数据调整为均值为 0,标准差为 1 的标准正态分布\n $$ z = \\frac{x - \\mu}{\\sigma} $$\n \\( z \\):标准化后的值 \\( x \\):原始数据值 \\( $\\mu$ \\):数据的均值 \\( $\\sigma$\\):数据的标准差\n \n ``` python\n \t from sklearn.preprocessing import StandardScaler # 标准化\n ```\n\n### KNN代码实现\n```python\nimport seaborn as sns\nimport matplotlib.pyplot as plt \nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import MinMaxScaler,StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n\n# 1 数据集获取\niris = load_iris()\n# print(iris.feature_names)\niris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])\niris_data['target'] = iris.target\n\ndef iris_plot(data,col1,col2):\n sns.lmplot(x=col1,y=col2,data=data,hue=\"target\",fit_reg=False)\n plt.show()\n# 2 数据集可视化\n# iris_plot(iris_data, 'Sepal_Width', 'Petal_Length')\n\n# 3 数据集的划分\nX_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2,random_state=44)\n# print(\"训练集的特征值:\\n\",X_train)\n# print(\"训练集的目标值:\\n\",y_train)\n# print(\"测试集的特征值:\\n\",X_test)\n# print(\"测试集的特征值:\\n\",y_test)\n\n# 4 归一化\ntransfer = StandardScaler()\nX_train = transfer.fit_transform(X_train)\nX_test = transfer.transform(X_test)\n# print(\"归一化的,X_train:\\n\",X_train)\n# print(\"归一化的X_test:\\n\",X_test)\n\n# 5 机器学习 KNN\n# 5.1 实例化估计器\nestimator = KNeighborsClassifier(n_neighbors=9)\n# 5.2 进行训练\nestimator.fit(X_train,y_train)\n\n# 6 模型评估\ny_pred = estimator.predict(X_test)\nprint(\"预测值:\\n\",y_pre)\nprint(\"预测值与真实值是否相等:\\n\",y_pred==y_test)\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"\\nKNN 模型的准确率: {accuracy:.4f}\")\n```\n\n![](/img/machinelearning/knn-01.png)\n### 交叉验证与网格搜索\n```python\nimport seaborn as sns\nimport matplotlib.pyplot as plt \nimport pandas as pd\nfrom sklearn.model_selection import train_test_split,GridSearchCV\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import MinMaxScaler,StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n\n# 1 数据集获取\niris = load_iris()\niris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])\niris_data['target'] = iris.target\n\n# 3 数据集的划分\nX_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2)\n\n# 4 归一化\ntransfer = StandardScaler()\nX_train = transfer.fit_transform(X_train)\nX_test = transfer.transform(X_test)\n\n# 5 机器学习 KNN\n# 5.1 实例化估计器\n#\n#不指定 n_neighbors ,使用网格搜索进行循环训练\nestimator = KNeighborsClassifier()\n# 5.2 模型调优 -- 交叉验证,网格搜素\nestimator = GridSearchCV(estimator,param_grid={\"n_neighbors\":[1,3,5,7]},cv=5) # 5 折\n# 5.2 进行训练\nestimator.fit(X_train,y_train)\n \n# 6 模型评估\ny_pred = estimator.predict(X_test)\nprint(\"预测值:\\n\",y_pred)\nprint(\"预测值与真实值是否相等:\\n\",y_pred==y_test)\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"\\nKNN 模型的准确率: {accuracy:.4f}\")\n\n# 交叉验证的相关参数\nprint(f\"最好结果:{estimator.best_score_}\")\nprint(f\"最好模型:{estimator.best_estimator_}\")\nprint(f\"最好模型结果:{estimator.cv_results_}\")\n```\n![](/img/machinelearning/cros-valid.png)\n\n### 机器学习的基本步骤\n- 获取数据集\n- 数据集基本处理\n - 去重去空、填充等操作 \n - 确定特征值和目标值\n - 分割数据集\n- 特征工程(特征预处理 标准化等)\n- 机器学习\n- 模型评估\n\n### 数据分割的方法\n- 留出法\n 训练/测试集的划分要尽可能保持数据分布的一致性,避免因数据划分过程引入额外的偏差而对最终结果产生影响。\n 单次使用留出法得到的估计结果往往不够稳定可靠,在使用留出法时,一般要采用若干次随机划分、重复进行实验评估后取平均值作为留出法的评估结果。\n ``` python\n\tfrom sklearn.model_selection import KFold,StratifiedKFold\n\timport pandas as pd\n\tX = np.array([\n\t[1,2,3,4],\n\t[11,12,13,14],\n\t[21,22,23,24],\n\t[31,32,33,34],\n\t[41,42,43,44],\n\t[51,52,53,54],\n\t[61,62,63,64],\n\t[71,72,73,74]\n\t])\n\ty=np.array([1,1,0,0,1,1,0,0])\n\tfolder = KFold(n_splits=4)\n\tsfloder = StratifiedKFold(n_splits=4)\n\tprint(\"KFOLD:\")\n\tfor train,test in folder.split(X,y):\n\t print(f\"train:{train},test:{test}\")\n\tprint(\"SKFOLD:\")\n\tfor train,test in sfloder.split(X,y):\n print(f\"train:{train},test:{test}\")\n\t```\n\t![](/img/machinelearning/kfold-skfold.png)\n- 自助法\n- 交叉验证法","source":"_posts/machinelearning/knn.md","raw":"---\ntitle: k近邻算法(K-Nearest Neighbors)KNN\ntags: machinelearning\nabbrlink: 29139\nmathjax: true\ndate: 2025-01-13 17:20:59\n---\n## **k近邻算法(K-Nearest Neighbors)KNN**\n将当前样本的类别归类于距离最近的**k**个样本的类别\n\n#### **距离公式(2维)**\n\n- 欧式距离\n$$\nd = \\sqrt{(x_1-y_1)^2 + (x_2 - y_2)^2}\n$$\n- 曼哈顿距离\n$$\nd = |x_1 - x_2| + |y_1 - y_2|\n$$\n- 切比雪夫距离\n$$\nd = \\max\\left(|x_1 - x_2|, |y_1 - y_2|\\right)\n$$\n#### k值选择问题\n\n| k值 | 影响 |\n| --- | ------------------ |\n| 越大 | 模型过拟合,准确率波动较大 |\n| 越小 | 模型欠拟合,准确率趋于稳定但可能较低 |\n### 特征预处理\n> 通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程 \n- 归一化\n 将数据变换到指定区间(默认是\\[0,1\\])\n $$ x' = \\frac{x- x_{\\text {min}}}{x_{\\text{max}} - x_{\\text{min}}} $$\n 若需要缩放到任意区间 \\(\\[a, b\\]\\),公式为: $$ x' = a + \\frac{(x - x_{\\text{min}}) \\cdot (b - a)}{x_{\\text{max}} - x_{\\text{min}}} $$\n 其中:\\( \\[a, b\\] \\):目标区间的范围\n 归一化受到数据集的异常值的影响,需要进行标准化处理(更加合理)\n ``` python\n\t from sklearn.preprocessing import MinMaxScaler # 归一化\n\t```\n- 标准化\n 将数据调整为均值为 0,标准差为 1 的标准正态分布\n $$ z = \\frac{x - \\mu}{\\sigma} $$\n \\( z \\):标准化后的值 \\( x \\):原始数据值 \\( $\\mu$ \\):数据的均值 \\( $\\sigma$\\):数据的标准差\n \n ``` python\n \t from sklearn.preprocessing import StandardScaler # 标准化\n ```\n\n### KNN代码实现\n```python\nimport seaborn as sns\nimport matplotlib.pyplot as plt \nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import MinMaxScaler,StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n\n# 1 数据集获取\niris = load_iris()\n# print(iris.feature_names)\niris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])\niris_data['target'] = iris.target\n\ndef iris_plot(data,col1,col2):\n sns.lmplot(x=col1,y=col2,data=data,hue=\"target\",fit_reg=False)\n plt.show()\n# 2 数据集可视化\n# iris_plot(iris_data, 'Sepal_Width', 'Petal_Length')\n\n# 3 数据集的划分\nX_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2,random_state=44)\n# print(\"训练集的特征值:\\n\",X_train)\n# print(\"训练集的目标值:\\n\",y_train)\n# print(\"测试集的特征值:\\n\",X_test)\n# print(\"测试集的特征值:\\n\",y_test)\n\n# 4 归一化\ntransfer = StandardScaler()\nX_train = transfer.fit_transform(X_train)\nX_test = transfer.transform(X_test)\n# print(\"归一化的,X_train:\\n\",X_train)\n# print(\"归一化的X_test:\\n\",X_test)\n\n# 5 机器学习 KNN\n# 5.1 实例化估计器\nestimator = KNeighborsClassifier(n_neighbors=9)\n# 5.2 进行训练\nestimator.fit(X_train,y_train)\n\n# 6 模型评估\ny_pred = estimator.predict(X_test)\nprint(\"预测值:\\n\",y_pre)\nprint(\"预测值与真实值是否相等:\\n\",y_pred==y_test)\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"\\nKNN 模型的准确率: {accuracy:.4f}\")\n```\n\n![](/img/machinelearning/knn-01.png)\n### 交叉验证与网格搜索\n```python\nimport seaborn as sns\nimport matplotlib.pyplot as plt \nimport pandas as pd\nfrom sklearn.model_selection import train_test_split,GridSearchCV\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import MinMaxScaler,StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n\n# 1 数据集获取\niris = load_iris()\niris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])\niris_data['target'] = iris.target\n\n# 3 数据集的划分\nX_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2)\n\n# 4 归一化\ntransfer = StandardScaler()\nX_train = transfer.fit_transform(X_train)\nX_test = transfer.transform(X_test)\n\n# 5 机器学习 KNN\n# 5.1 实例化估计器\n#\n#不指定 n_neighbors ,使用网格搜索进行循环训练\nestimator = KNeighborsClassifier()\n# 5.2 模型调优 -- 交叉验证,网格搜素\nestimator = GridSearchCV(estimator,param_grid={\"n_neighbors\":[1,3,5,7]},cv=5) # 5 折\n# 5.2 进行训练\nestimator.fit(X_train,y_train)\n \n# 6 模型评估\ny_pred = estimator.predict(X_test)\nprint(\"预测值:\\n\",y_pred)\nprint(\"预测值与真实值是否相等:\\n\",y_pred==y_test)\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"\\nKNN 模型的准确率: {accuracy:.4f}\")\n\n# 交叉验证的相关参数\nprint(f\"最好结果:{estimator.best_score_}\")\nprint(f\"最好模型:{estimator.best_estimator_}\")\nprint(f\"最好模型结果:{estimator.cv_results_}\")\n```\n![](/img/machinelearning/cros-valid.png)\n\n### 机器学习的基本步骤\n- 获取数据集\n- 数据集基本处理\n - 去重去空、填充等操作 \n - 确定特征值和目标值\n - 分割数据集\n- 特征工程(特征预处理 标准化等)\n- 机器学习\n- 模型评估\n\n### 数据分割的方法\n- 留出法\n 训练/测试集的划分要尽可能保持数据分布的一致性,避免因数据划分过程引入额外的偏差而对最终结果产生影响。\n 单次使用留出法得到的估计结果往往不够稳定可靠,在使用留出法时,一般要采用若干次随机划分、重复进行实验评估后取平均值作为留出法的评估结果。\n ``` python\n\tfrom sklearn.model_selection import KFold,StratifiedKFold\n\timport pandas as pd\n\tX = np.array([\n\t[1,2,3,4],\n\t[11,12,13,14],\n\t[21,22,23,24],\n\t[31,32,33,34],\n\t[41,42,43,44],\n\t[51,52,53,54],\n\t[61,62,63,64],\n\t[71,72,73,74]\n\t])\n\ty=np.array([1,1,0,0,1,1,0,0])\n\tfolder = KFold(n_splits=4)\n\tsfloder = StratifiedKFold(n_splits=4)\n\tprint(\"KFOLD:\")\n\tfor train,test in folder.split(X,y):\n\t print(f\"train:{train},test:{test}\")\n\tprint(\"SKFOLD:\")\n\tfor train,test in sfloder.split(X,y):\n print(f\"train:{train},test:{test}\")\n\t```\n\t![](/img/machinelearning/kfold-skfold.png)\n- 自助法\n- 交叉验证法","slug":"machinelearning/knn","published":1,"updated":"2025-01-14T09:21:29.069Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdi0005tgah797v3otl","content":"

k近邻算法(K-Nearest Neighbors)KNN

将当前样本的类别归类于距离最近的k个样本的类别

\n

距离公式(2维)

\n

k值选择问题

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
k值影响
越大模型过拟合,准确率波动较大
越小模型欠拟合,准确率趋于稳定但可能较低
\n

特征预处理

\n

通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程

\n
\n\n

KNN代码实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score

# 1 数据集获取
iris = load_iris()
# print(iris.feature_names)
iris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])
iris_data['target'] = iris.target

def iris_plot(data,col1,col2):
sns.lmplot(x=col1,y=col2,data=data,hue="target",fit_reg=False)
plt.show()
# 2 数据集可视化
# iris_plot(iris_data, 'Sepal_Width', 'Petal_Length')

# 3 数据集的划分
X_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2,random_state=44)
# print("训练集的特征值:\\n",X_train)
# print("训练集的目标值:\\n",y_train)
# print("测试集的特征值:\\n",X_test)
# print("测试集的特征值:\\n",y_test)

# 4 归一化
transfer = StandardScaler()
X_train = transfer.fit_transform(X_train)
X_test = transfer.transform(X_test)
# print("归一化的,X_train:\\n",X_train)
# print("归一化的X_test:\\n",X_test)

# 5 机器学习 KNN
# 5.1 实例化估计器
estimator = KNeighborsClassifier(n_neighbors=9)
# 5.2 进行训练
estimator.fit(X_train,y_train)

# 6 模型评估
y_pred = estimator.predict(X_test)
print("预测值:\\n",y_pre)
print("预测值与真实值是否相等:\\n",y_pred==y_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"\\nKNN 模型的准确率: {accuracy:.4f}")
\n\n

\n

交叉验证与网格搜索

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score

# 1 数据集获取
iris = load_iris()
iris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])
iris_data['target'] = iris.target

# 3 数据集的划分
X_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2)

# 4 归一化
transfer = StandardScaler()
X_train = transfer.fit_transform(X_train)
X_test = transfer.transform(X_test)

# 5 机器学习 KNN
# 5.1 实例化估计器
#
#不指定 <code> n_neighbors </code> ,使用网格搜索进行循环训练
estimator = KNeighborsClassifier()
# 5.2 模型调优 -- 交叉验证,网格搜素
estimator = GridSearchCV(estimator,param_grid={"n_neighbors":[1,3,5,7]},cv=5) # 5 折
# 5.2 进行训练
estimator.fit(X_train,y_train)

# 6 模型评估
y_pred = estimator.predict(X_test)
print("预测值:\\n",y_pred)
print("预测值与真实值是否相等:\\n",y_pred==y_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"\\nKNN 模型的准确率: {accuracy:.4f}")

# 交叉验证的相关参数
print(f"最好结果:{estimator.best_score_}")
print(f"最好模型:{estimator.best_estimator_}")
print(f"最好模型结果:{estimator.cv_results_}")
\n

\n

机器学习的基本步骤

\n

数据分割的方法

\n","cover":false,"excerpt":"","more":"

k近邻算法(K-Nearest Neighbors)KNN

将当前样本的类别归类于距离最近的k个样本的类别

\n

距离公式(2维)

\n

k值选择问题

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
k值影响
越大模型过拟合,准确率波动较大
越小模型欠拟合,准确率趋于稳定但可能较低
\n

特征预处理

\n

通过一些转换函数将特征数据转换成更加适合算法模型的特征数据过程

\n
\n\n

KNN代码实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score

# 1 数据集获取
iris = load_iris()
# print(iris.feature_names)
iris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])
iris_data['target'] = iris.target

def iris_plot(data,col1,col2):
sns.lmplot(x=col1,y=col2,data=data,hue="target",fit_reg=False)
plt.show()
# 2 数据集可视化
# iris_plot(iris_data, 'Sepal_Width', 'Petal_Length')

# 3 数据集的划分
X_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2,random_state=44)
# print("训练集的特征值:\\n",X_train)
# print("训练集的目标值:\\n",y_train)
# print("测试集的特征值:\\n",X_test)
# print("测试集的特征值:\\n",y_test)

# 4 归一化
transfer = StandardScaler()
X_train = transfer.fit_transform(X_train)
X_test = transfer.transform(X_test)
# print("归一化的,X_train:\\n",X_train)
# print("归一化的X_test:\\n",X_test)

# 5 机器学习 KNN
# 5.1 实例化估计器
estimator = KNeighborsClassifier(n_neighbors=9)
# 5.2 进行训练
estimator.fit(X_train,y_train)

# 6 模型评估
y_pred = estimator.predict(X_test)
print("预测值:\\n",y_pre)
print("预测值与真实值是否相等:\\n",y_pred==y_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"\\nKNN 模型的准确率: {accuracy:.4f}")
\n\n

\n

交叉验证与网格搜索

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score

# 1 数据集获取
iris = load_iris()
iris_data = pd.DataFrame(iris.data,columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])
iris_data['target'] = iris.target

# 3 数据集的划分
X_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size=0.2)

# 4 归一化
transfer = StandardScaler()
X_train = transfer.fit_transform(X_train)
X_test = transfer.transform(X_test)

# 5 机器学习 KNN
# 5.1 实例化估计器
#
#不指定 <code> n_neighbors </code> ,使用网格搜索进行循环训练
estimator = KNeighborsClassifier()
# 5.2 模型调优 -- 交叉验证,网格搜素
estimator = GridSearchCV(estimator,param_grid={"n_neighbors":[1,3,5,7]},cv=5) # 5 折
# 5.2 进行训练
estimator.fit(X_train,y_train)

# 6 模型评估
y_pred = estimator.predict(X_test)
print("预测值:\\n",y_pred)
print("预测值与真实值是否相等:\\n",y_pred==y_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"\\nKNN 模型的准确率: {accuracy:.4f}")

# 交叉验证的相关参数
print(f"最好结果:{estimator.best_score_}")
print(f"最好模型:{estimator.best_estimator_}")
print(f"最好模型结果:{estimator.cv_results_}")
\n

\n

机器学习的基本步骤

\n

数据分割的方法

\n"},{"title":"page","abbrlink":1441,"date":"2024-08-01T01:00:10.000Z","_content":"\n- [deploy](./deploy)\n- ","source":"_posts/frontend/frontend.md","raw":"---\ntitle: page\nabbrlink: 1441\ndate: 2024-08-01 09:00:10\ntags:\n---\n\n- [deploy](./deploy)\n- ","slug":"frontend/frontend","published":1,"updated":"2024-08-09T12:21:50.023Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdj0007tgah975l4qm5","content":"\n","cover":false,"excerpt":"","more":"\n"},{"title":"网络相关","abbrlink":41168,"date":"2024-08-07T02:06:08.000Z","_content":"\n","source":"_posts/net/index.md","raw":"---\ntitle: 网络相关\nabbrlink: 41168\ndate: 2024-08-07 10:06:08\ntags:\n---\n\n","slug":"net/index","published":1,"updated":"2024-08-09T12:21:50.028Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdm000atgahetofhexy","content":"","cover":false,"excerpt":"","more":""},{"title":"uniapp 开发","abbrlink":58817,"date":"2024-08-05T06:07:01.000Z","_content":"- [uniapp component](../component1)","source":"_posts/frontend/uniapp/uniapp.md","raw":"---\ntitle: uniapp 开发\ntags: uniapp\nabbrlink: 58817\ndate: 2024-08-05 14:07:01\n---\n- [uniapp component](../component1)","slug":"frontend/uniapp/uniapp","published":1,"updated":"2024-08-09T12:21:50.039Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdn000ctgah8ll3dor6","content":"\n","cover":false,"excerpt":"","more":"\n"},{"title":"郑伯克段于鄢","abbrlink":58638,"date":"2024-08-09T12:00:13.000Z","_content":"原文如下:\n\n      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。","source":"_posts/ancient/guwenguanzhi/1.md","raw":"---\ntitle: 郑伯克段于鄢\ntags: 古文观止\ncategories:\n - 古文观止\nabbrlink: 58638\ndate: 2024-08-09 20:00:13\n---\n原文如下:\n\n      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。","slug":"ancient/guwenguanzhi/1","published":1,"updated":"2024-08-10T02:31:03.678Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdn000dtgah04audthw","content":"

原文如下:

\n

      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。

\n","cover":false,"excerpt":"","more":"

原文如下:

\n

      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。

\n"},{"title":"组件使用","abbrlink":33957,"date":"2024-08-05T06:07:01.000Z","_content":"\n### 组件自动导入\n```json\n\t\"easycom\":{\n\t \"autoscan\": true,\n\t \"custom\": {\n\t \"^tui-(.*)\": \"@/components/thorui/tui-$1/tui-$1.vue\" // 匹配components目录内的vue文件\n\t }\n\t}\n```\n\n### `tui-sticky 吸顶容器` \n\n> 包含 以下 `tui` 组件 :\n> - tui-sticky\n> - tui-list-view\n> - tui-list-cell\n> \n\n```html\n\n \n \n \n \n \n \n\n\n\n```\n","source":"_posts/frontend/uniapp/component1.md","raw":"---\ntitle: 组件使用\ntags: uniapp\nabbrlink: 33957\ndate: 2024-08-05 14:07:01\n---\n\n### 组件自动导入\n```json\n\t\"easycom\":{\n\t \"autoscan\": true,\n\t \"custom\": {\n\t \"^tui-(.*)\": \"@/components/thorui/tui-$1/tui-$1.vue\" // 匹配components目录内的vue文件\n\t }\n\t}\n```\n\n### `tui-sticky 吸顶容器` \n\n> 包含 以下 `tui` 组件 :\n> - tui-sticky\n> - tui-list-view\n> - tui-list-cell\n> \n\n```html\n\n \n \n \n \n \n \n\n\n\n```\n","slug":"frontend/uniapp/component1","published":1,"updated":"2024-08-09T12:21:50.042Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdo000gtgah38tl6q9j","content":"

组件自动导入

1
2
3
4
5
6
"easycom":{
"autoscan": true,
"custom": {
"^tui-(.*)": "@/components/thorui/tui-$1/tui-$1.vue" // 匹配components目录内的vue文件
}
}
\n\n

tui-sticky 吸顶容器

\n

包含 以下 tui 组件 :

\n\n
\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
<tui-sticky :scrollTop="scrollTop" stickyHeight="104rpx" container>
<!-- header start -->
<template v-slot:header>
<view class="sticky-item">
<view class="setting">设置</view>
</view>
</template>
<!-- header end -->
<!--内容 start-->
<template v-slot:content>
<tui-list-view class="content">
<tui-list-cell :arrow="false">
<switch class='switch' checked color="#FFCC33" />
</tui-list-cell>
</tui-list-view>
</template>
<!--内容 end-->
</tui-sticky>

<script setup>
import { ref } from 'vue'
import { onPageScroll } from '@dcloudio/uni-app'

// 定义 scrollTop 响应式变量
const scrollTop = ref(0)
// 监听页面滚动事件
onPageScroll((e) => {
scrollTop.value = e.scrollTop
})
</script>
\n","cover":false,"excerpt":"","more":"

组件自动导入

1
2
3
4
5
6
"easycom":{
"autoscan": true,
"custom": {
"^tui-(.*)": "@/components/thorui/tui-$1/tui-$1.vue" // 匹配components目录内的vue文件
}
}
\n\n

tui-sticky 吸顶容器

\n

包含 以下 tui 组件 :

\n\n
\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
<tui-sticky :scrollTop="scrollTop" stickyHeight="104rpx" container>
<!-- header start -->
<template v-slot:header>
<view class="sticky-item">
<view class="setting">设置</view>
</view>
</template>
<!-- header end -->
<!--内容 start-->
<template v-slot:content>
<tui-list-view class="content">
<tui-list-cell :arrow="false">
<switch class='switch' checked color="#FFCC33" />
</tui-list-cell>
</tui-list-view>
</template>
<!--内容 end-->
</tui-sticky>

<script setup>
import { ref } from 'vue'
import { onPageScroll } from '@dcloudio/uni-app'

// 定义 scrollTop 响应式变量
const scrollTop = ref(0)
// 监听页面滚动事件
onPageScroll((e) => {
scrollTop.value = e.scrollTop
})
</script>
\n"},{"title":"Docker被墙,如何继续使用?","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":47807,"date":"2024-08-01T01:10:40.000Z","_content":"\n## Docker Download\n> 自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。\n> 解决docker安装问题:https://github.com/shenjianZ/docker_installer\n\n1. 安装docker \n ```shell\n sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun\n ```\n \n2. 启动docker\n ```shell\n sudo systemctl start docker\n ```\n \n3. 设置开机自启\n ```shell\n sudo systemctl enable docker\n ```\n \n4. Docker pull images\n > 将image下载到阿里云镜像仓库中\n > 解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher\n \n 1. **登录阿里云镜像服务** https://cr.console.aliyun.com/,\n\n 启用个人实例,创建一个命名空间(`ALIYUN_NAME_SPACE`)\n\n 2. 在**访问凭证** 可以看到账号 用户名(`ALIYUN_REGISTRY_USER`)\n\n 密码(`ALIYUN_REGISTRY_PASSWORD`)\n\n 仓库地址(`ALIYUN_REGISTRY`)\n\n 3. **启动Action**\n 进入您自己的项目,点击`Action`,启用`Github Action`功能\n \n 4. **配置环境变量**\n 进入Settings->Secret and variables->Actions->New Repository secret\n 将上一步的四个值\n `ALIYUN_NAME_SPACE`,\n \n ` ALIYUN_REGISTRY_USER`,\n \n `ALIYUN_REGISTRY_PASSWORD`,\n \n `ALIYUN_REGISTRY`\n 配置成环境变量\n \n 5. **添加镜像**\n 打开`images.txt`文件,添加你想要的镜像 可以加tag\n \n 6. 使用镜像\n 回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)","source":"_posts/frontend/deploy/deploy.md","raw":"---\ntitle: Docker被墙,如何继续使用?\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 47807\ndate: 2024-08-01 09:10:40\ntags:\n---\n\n## Docker Download\n> 自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。\n> 解决docker安装问题:https://github.com/shenjianZ/docker_installer\n\n1. 安装docker \n ```shell\n sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun\n ```\n \n2. 启动docker\n ```shell\n sudo systemctl start docker\n ```\n \n3. 设置开机自启\n ```shell\n sudo systemctl enable docker\n ```\n \n4. Docker pull images\n > 将image下载到阿里云镜像仓库中\n > 解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher\n \n 1. **登录阿里云镜像服务** https://cr.console.aliyun.com/,\n\n 启用个人实例,创建一个命名空间(`ALIYUN_NAME_SPACE`)\n\n 2. 在**访问凭证** 可以看到账号 用户名(`ALIYUN_REGISTRY_USER`)\n\n 密码(`ALIYUN_REGISTRY_PASSWORD`)\n\n 仓库地址(`ALIYUN_REGISTRY`)\n\n 3. **启动Action**\n 进入您自己的项目,点击`Action`,启用`Github Action`功能\n \n 4. **配置环境变量**\n 进入Settings->Secret and variables->Actions->New Repository secret\n 将上一步的四个值\n `ALIYUN_NAME_SPACE`,\n \n ` ALIYUN_REGISTRY_USER`,\n \n `ALIYUN_REGISTRY_PASSWORD`,\n \n `ALIYUN_REGISTRY`\n 配置成环境变量\n \n 5. **添加镜像**\n 打开`images.txt`文件,添加你想要的镜像 可以加tag\n \n 6. 使用镜像\n 回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)","slug":"frontend/deploy/deploy","published":1,"updated":"2024-08-09T12:21:50.045Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdt000otgah0ogb4vdp","content":"

Docker Download

\n

自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。
解决docker安装问题:https://github.com/shenjianZ/docker_installer

\n
\n
    \n
  1. 安装docker

    \n
    1
    sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun
    \n
  2. \n
  3. 启动docker

    \n
    1
    sudo systemctl start docker
    \n
  4. \n
  5. 设置开机自启

    \n
    1
    sudo systemctl enable docker
    \n
  6. \n
  7. Docker pull images

    \n
    \n

    将image下载到阿里云镜像仓库中
    解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher

    \n
    \n
      \n
    1. 登录阿里云镜像服务 https://cr.console.aliyun.com/,

      \n

      启用个人实例,创建一个命名空间(ALIYUN_NAME_SPACE

      \n
    2. \n
    3. 访问凭证 可以看到账号 用户名(ALIYUN_REGISTRY_USER)

      \n

      密码(ALIYUN_REGISTRY_PASSWORD)

      \n

      仓库地址(ALIYUN_REGISTRY

      \n
    4. \n
    5. 启动Action
      进入您自己的项目,点击Action,启用Github Action功能

      \n
    6. \n
    7. 配置环境变量
      进入Settings->Secret and variables->Actions->New Repository secret
      将上一步的四个值
      ALIYUN_NAME_SPACE,

      \n

      ALIYUN_REGISTRY_USER

      \n

      ALIYUN_REGISTRY_PASSWORD

      \n

      ALIYUN_REGISTRY
      配置成环境变量

      \n
    8. \n
    9. 添加镜像
      打开images.txt文件,添加你想要的镜像 可以加tag

      \n
    10. \n
    11. 使用镜像
      回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)

      \n
    12. \n
    \n
  8. \n
\n","cover":false,"excerpt":"","more":"

Docker Download

\n

自从docker官方仓库在中国大陆被墙后,docker的部署方式也发生了改变。
解决docker安装问题:https://github.com/shenjianZ/docker_installer

\n
\n
    \n
  1. 安装docker

    \n
    1
    sudo curl -fsSL https://gitee.com/tech-shrimp/docker_installer/releases/download/latest/linux.sh| bash -s docker --mirror Aliyun
    \n
  2. \n
  3. 启动docker

    \n
    1
    sudo systemctl start docker
    \n
  4. \n
  5. 设置开机自启

    \n
    1
    sudo systemctl enable docker
    \n
  6. \n
  7. Docker pull images

    \n
    \n

    将image下载到阿里云镜像仓库中
    解决docker pull 镜像问题:https://github.com/shenjianZ/docker_image_pusher

    \n
    \n
      \n
    1. 登录阿里云镜像服务 https://cr.console.aliyun.com/,

      \n

      启用个人实例,创建一个命名空间(ALIYUN_NAME_SPACE

      \n
    2. \n
    3. 访问凭证 可以看到账号 用户名(ALIYUN_REGISTRY_USER)

      \n

      密码(ALIYUN_REGISTRY_PASSWORD)

      \n

      仓库地址(ALIYUN_REGISTRY

      \n
    4. \n
    5. 启动Action
      进入您自己的项目,点击Action,启用Github Action功能

      \n
    6. \n
    7. 配置环境变量
      进入Settings->Secret and variables->Actions->New Repository secret
      将上一步的四个值
      ALIYUN_NAME_SPACE,

      \n

      ALIYUN_REGISTRY_USER

      \n

      ALIYUN_REGISTRY_PASSWORD

      \n

      ALIYUN_REGISTRY
      配置成环境变量

      \n
    8. \n
    9. 添加镜像
      打开images.txt文件,添加你想要的镜像 可以加tag

      \n
    10. \n
    11. 使用镜像
      回到阿里云,镜像仓库,点击任意镜像,可查看镜像状态。(可以改成公开,拉取镜像免登录)

      \n
    12. \n
    \n
  8. \n
\n"},{"title":"Hadoop集群搭建基础环境","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":61253,"date":"2024-09-11T14:45:40.000Z","_content":"\n### 防火墙关闭\n```bash\n# 在 6 台主机执行\nsystemctl stop firewalld\nsystemctl disable firewalld\n```\n### 配置yum源\n- 下载 repo 文件:\n [Centos-7.repo](http://mirrors.aliyun.com/repo/Centos-7.repo)\n 并上传到`/tmp`,进入到`/tmp`\n- 备份并且替换系统的repo文件\n ``` bash\n \tcp Centos-7.repo /etc/yum.repos.d/ \n\tcd /etc/yum.repos.d/ \n\tmv CentOS-Base.repo CentOS-Base.repo.bak \n\tmv Centos-7.repo CentOS-Base.repo\n\t ```\n- 将`nn1`上的`CentOS-Base.repo`拷贝到其他主机\n ```bash\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn3:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s1:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s3:/etc/yum.repos.d\n ```\n- 执行yum源更新命令\n ```bash\n\t yum clean all\n\t yum makecache \n\t yum update -y \n\t```\n- 安装常用软件\n ```bash\n yum install -y openssh-server vim gcc gcc-c++ glibc-headers bzip2-devel lzo-devel curl wget openssh-clients zlib-devel autoconf automake cmake libtool openssl-devel fuse-devel snappy-devel telnet unzip zip net-tools.x86_64 firewalld systemd ntp unrar bzip2\n ```\n### JDK安装\n>注意需要在六台机器依次执行\n- 上传到`/tmp`目录下,安装\n ```bash\n cd /tmp\n rpm -ivh jdk-8u144-linux-x64.rpm\n ```\n- 配置环境变量\n ```bash\n ln -s /usr/java/jdk1.8.0_144/ /usr/java/jdk1.8\n echo 'export JAVA_HOME=/usr/java/jdk1.8' >> /etc/profile.d/myEnv.sh \n echo 'export PATH=$PATH:$JAVA_HOME/bin' >> /etc/profile.d/myEnv.sh \n source /etc/profile \n java -version\n ```\n### 修改主机名和主机名映射\n\n```bash\nvim /etc/hostname\n```\n6台机器分别为nn1、nn2、nn3、s1、s2、s3\n\n```bash\nvim /etc/hosts\n```\n\n修改为\n```text\n192.168.1.30 nn1\n192.168.1.31 nn2\n192.168.1.32 nn3\n192.168.1.33 s1\n192.168.1.34 s2\n192.168.1.35 s3\n```\n### 创建hadoop用户\n```bash\n#创建hadoop用户 \nuseradd hadoop \n#给hadoop用户设置密码: 12345678 \npasswd hadoop\n```\n### 禁止非 wheel 组用户切换到root,配置免密切换root\n- 修改/etc/pam.d/su配置\n ```bash\n sed -i 's/#auth\\t\\trequired\\tpam_wheel.so/auth\\t\\trequired\\tpam_wheel.so/g' '/etc/pam.d/su' \n sed -i 's/#auth\\t\\tsufficient\\tpam_wheel.so/auth\\t\\tsufficient\\tpam_wheel.so/g' '/etc/pam.d/su'\n ```\n- 修改/etc/login.defs文件\n ```bash\n echo \"SU_WHEEL_ONLY yes\" >> /etc/login.defs\n ```\n- 添加用户到管理员,禁止普通用户su 到 root\n ```bash\n #把hadoop用户加到wheel组里\n gpasswd -a hadoop wheel\n #查看wheel组里是否有hadoop用户\n cat /etc/group | grep wheel\n ```\n### 给hadoop用户,配置SSH密钥\n#### 配置hadoop用户ssh免密码登录到hadoop\n- 仅在`nn1`执行这段脚本命令即可\n 但是 `su - hadoop ` ,` mkdir ~/.ssh` 需要在其他主机执行一下\n ```bash\n #切换到hadoop用户 \n su - hadoop\n #生成ssh公私钥 \n ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''\n ssh-copy-id nn1\n ssh-copy-id nn2\n ssh-copy-id nn3\n ssh-copy-id s1\n ssh-copy-id s2\n ssh-copy-id s3\n scp /home/hadoop/.ssh/id_rsa hadoop@nn2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@nn3:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s1:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s3:/home/hadoop/.ssh\n ```\n#### 配置hadoop用户ssh免密码登录到root\n- 同上\n ```bash\n ssh-copy-id root@nn1\n ssh-copy-id root@ nn2\n ssh-copy-id root@nn3\n ssh-copy-id root@s1\n ssh-copy-id root@s2\n ssh-copy-id root@s3\n scp /home/hadoop/.ssh/id_rsa root@nn2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@nn3:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s1:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s3:/root/.ssh\n ```\n### 脚本配置\n- **ips**\n ```bash\n vim /home/hadoop/bin/ips\n ```\n \n ```bash\n nn1 \n nn2\n nn3\n s1 \n s2 \n s3\n ```\n- **ssh_all.sh**\n ```bash\n vim /home/hadoop/bin/ssh_all.sh\n ```\n\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} \\\"$*\\\" \"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **ssh_root.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} ~/bin/exe.sh \\\"$*\\\"\"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **scp_all.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 源\n source_=$1\n # 目标\n target=$2\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接scp命令: scp 源 hadoop@nn1.hadoop:目标\n cmd_=\"scp -r ${source_} hadoop@${ip}:${target}\"\n echo $cmd_\n # 通过eval命令 执行 拼接的scp 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **exe.sh**\n ```bash\n #切换到root用户执行cmd命令\n cmd=$*\n su - << EOF\n $cmd\n EOF\n ```\n- 赋予执行权限\n ```bash\n chmod +x ssh_all.sh \n chmod +x scp_all.sh\n chmod +x ssh_root.sh\n chmod +x exe.sh\n ```\n- 分发到其他主机\n ```bash\n ./ssh_all.sh mkdir /home/hadoop/bin\n ./scp_all.sh /home/hadoop/bin/ips /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/exe.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/scp_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_root.sh /home/hadoop/bin/\n ```\n\n- 将 `/home/hadoop/bin`添加到hadoop用户的环境变量,需要切换到`hadoop`用户\n\n ```bash\n echo 'export PATH=$PATH:/home/hadoop/bin' >> ~/.bashrc && source ~/.bashrc\n scp_all.sh /home/hadoop/.bashrc /home/hadoop/\n ssh_all.sh source ~/.bashrc \n ```","source":"_posts/bigdata/hadoop/env.md","raw":"---\ntitle: Hadoop集群搭建基础环境\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 61253\ndate: 2024-09-011 22:45:40\n---\n\n### 防火墙关闭\n```bash\n# 在 6 台主机执行\nsystemctl stop firewalld\nsystemctl disable firewalld\n```\n### 配置yum源\n- 下载 repo 文件:\n [Centos-7.repo](http://mirrors.aliyun.com/repo/Centos-7.repo)\n 并上传到`/tmp`,进入到`/tmp`\n- 备份并且替换系统的repo文件\n ``` bash\n \tcp Centos-7.repo /etc/yum.repos.d/ \n\tcd /etc/yum.repos.d/ \n\tmv CentOS-Base.repo CentOS-Base.repo.bak \n\tmv Centos-7.repo CentOS-Base.repo\n\t ```\n- 将`nn1`上的`CentOS-Base.repo`拷贝到其他主机\n ```bash\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@nn3:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s1:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s2:/etc/yum.repos.d\n scp /etc/yum.repos.d/CentOS-Base.repo root@s3:/etc/yum.repos.d\n ```\n- 执行yum源更新命令\n ```bash\n\t yum clean all\n\t yum makecache \n\t yum update -y \n\t```\n- 安装常用软件\n ```bash\n yum install -y openssh-server vim gcc gcc-c++ glibc-headers bzip2-devel lzo-devel curl wget openssh-clients zlib-devel autoconf automake cmake libtool openssl-devel fuse-devel snappy-devel telnet unzip zip net-tools.x86_64 firewalld systemd ntp unrar bzip2\n ```\n### JDK安装\n>注意需要在六台机器依次执行\n- 上传到`/tmp`目录下,安装\n ```bash\n cd /tmp\n rpm -ivh jdk-8u144-linux-x64.rpm\n ```\n- 配置环境变量\n ```bash\n ln -s /usr/java/jdk1.8.0_144/ /usr/java/jdk1.8\n echo 'export JAVA_HOME=/usr/java/jdk1.8' >> /etc/profile.d/myEnv.sh \n echo 'export PATH=$PATH:$JAVA_HOME/bin' >> /etc/profile.d/myEnv.sh \n source /etc/profile \n java -version\n ```\n### 修改主机名和主机名映射\n\n```bash\nvim /etc/hostname\n```\n6台机器分别为nn1、nn2、nn3、s1、s2、s3\n\n```bash\nvim /etc/hosts\n```\n\n修改为\n```text\n192.168.1.30 nn1\n192.168.1.31 nn2\n192.168.1.32 nn3\n192.168.1.33 s1\n192.168.1.34 s2\n192.168.1.35 s3\n```\n### 创建hadoop用户\n```bash\n#创建hadoop用户 \nuseradd hadoop \n#给hadoop用户设置密码: 12345678 \npasswd hadoop\n```\n### 禁止非 wheel 组用户切换到root,配置免密切换root\n- 修改/etc/pam.d/su配置\n ```bash\n sed -i 's/#auth\\t\\trequired\\tpam_wheel.so/auth\\t\\trequired\\tpam_wheel.so/g' '/etc/pam.d/su' \n sed -i 's/#auth\\t\\tsufficient\\tpam_wheel.so/auth\\t\\tsufficient\\tpam_wheel.so/g' '/etc/pam.d/su'\n ```\n- 修改/etc/login.defs文件\n ```bash\n echo \"SU_WHEEL_ONLY yes\" >> /etc/login.defs\n ```\n- 添加用户到管理员,禁止普通用户su 到 root\n ```bash\n #把hadoop用户加到wheel组里\n gpasswd -a hadoop wheel\n #查看wheel组里是否有hadoop用户\n cat /etc/group | grep wheel\n ```\n### 给hadoop用户,配置SSH密钥\n#### 配置hadoop用户ssh免密码登录到hadoop\n- 仅在`nn1`执行这段脚本命令即可\n 但是 `su - hadoop ` ,` mkdir ~/.ssh` 需要在其他主机执行一下\n ```bash\n #切换到hadoop用户 \n su - hadoop\n #生成ssh公私钥 \n ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''\n ssh-copy-id nn1\n ssh-copy-id nn2\n ssh-copy-id nn3\n ssh-copy-id s1\n ssh-copy-id s2\n ssh-copy-id s3\n scp /home/hadoop/.ssh/id_rsa hadoop@nn2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@nn3:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s1:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s2:/home/hadoop/.ssh\n scp /home/hadoop/.ssh/id_rsa hadoop@s3:/home/hadoop/.ssh\n ```\n#### 配置hadoop用户ssh免密码登录到root\n- 同上\n ```bash\n ssh-copy-id root@nn1\n ssh-copy-id root@ nn2\n ssh-copy-id root@nn3\n ssh-copy-id root@s1\n ssh-copy-id root@s2\n ssh-copy-id root@s3\n scp /home/hadoop/.ssh/id_rsa root@nn2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@nn3:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s1:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s2:/root/.ssh\n scp /home/hadoop/.ssh/id_rsa root@s3:/root/.ssh\n ```\n### 脚本配置\n- **ips**\n ```bash\n vim /home/hadoop/bin/ips\n ```\n \n ```bash\n nn1 \n nn2\n nn3\n s1 \n s2 \n s3\n ```\n- **ssh_all.sh**\n ```bash\n vim /home/hadoop/bin/ssh_all.sh\n ```\n\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} \\\"$*\\\" \"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **ssh_root.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接ssh命令: ssh hadoop@nn1.hadoop ls\n cmd_=\"ssh hadoop@${ip} ~/bin/exe.sh \\\"$*\\\"\"\n echo $cmd_\n # 通过eval命令 执行 拼接的ssh 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **scp_all.sh**\n ```bash\n #! /bin/bash\n # 进入到当前脚本所在目录\n cd `dirname $0`\n # 获取当前脚本所在目录\n dir_path=`pwd`\n #echo $dir_path\n # 读ips文件得到数组(里面是一堆主机名)\n ip_arr=(`cat $dir_path/ips`)\n # 源\n source_=$1\n # 目标\n target=$2\n # 遍历数组里的主机名\n for ip in ${ip_arr[*]}\n do\n # 拼接scp命令: scp 源 hadoop@nn1.hadoop:目标\n cmd_=\"scp -r ${source_} hadoop@${ip}:${target}\"\n echo $cmd_\n # 通过eval命令 执行 拼接的scp 命令\n if eval ${cmd_} ; then\n echo \"OK\"\n else\n echo \"FAIL\"\n fi\n done\n ```\n- **exe.sh**\n ```bash\n #切换到root用户执行cmd命令\n cmd=$*\n su - << EOF\n $cmd\n EOF\n ```\n- 赋予执行权限\n ```bash\n chmod +x ssh_all.sh \n chmod +x scp_all.sh\n chmod +x ssh_root.sh\n chmod +x exe.sh\n ```\n- 分发到其他主机\n ```bash\n ./ssh_all.sh mkdir /home/hadoop/bin\n ./scp_all.sh /home/hadoop/bin/ips /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/exe.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/scp_all.sh /home/hadoop/bin/\n ./scp_all.sh /home/hadoop/bin/ssh_root.sh /home/hadoop/bin/\n ```\n\n- 将 `/home/hadoop/bin`添加到hadoop用户的环境变量,需要切换到`hadoop`用户\n\n ```bash\n echo 'export PATH=$PATH:/home/hadoop/bin' >> ~/.bashrc && source ~/.bashrc\n scp_all.sh /home/hadoop/.bashrc /home/hadoop/\n ssh_all.sh source ~/.bashrc \n ```","slug":"bigdata/hadoop/env","published":1,"updated":"2024-09-11T14:45:28.095Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdu000ptgah6cdk1e7e","content":"

防火墙关闭

1
2
3
# 在 6 台主机执行
systemctl stop firewalld
systemctl disable firewalld
\n

配置yum源

\n

JDK安装

\n

注意需要在六台机器依次执行

\n
\n\n

修改主机名和主机名映射

1
vim /etc/hostname
\n

6台机器分别为nn1、nn2、nn3、s1、s2、s3

\n
1
vim /etc/hosts
\n\n

修改为

\n
1
2
3
4
5
6
192.168.1.30 nn1
192.168.1.31 nn2
192.168.1.32 nn3
192.168.1.33 s1
192.168.1.34 s2
192.168.1.35 s3
\n

创建hadoop用户

1
2
3
4
#创建hadoop用户 
useradd hadoop
#给hadoop用户设置密码: 12345678
passwd hadoop
\n

禁止非 wheel 组用户切换到root,配置免密切换root

\n

给hadoop用户,配置SSH密钥

配置hadoop用户ssh免密码登录到hadoop

\n

配置hadoop用户ssh免密码登录到root

\n

脚本配置

\n","cover":false,"excerpt":"","more":"

防火墙关闭

1
2
3
# 在 6 台主机执行
systemctl stop firewalld
systemctl disable firewalld
\n

配置yum源

\n

JDK安装

\n

注意需要在六台机器依次执行

\n
\n\n

修改主机名和主机名映射

1
vim /etc/hostname
\n

6台机器分别为nn1、nn2、nn3、s1、s2、s3

\n
1
vim /etc/hosts
\n\n

修改为

\n
1
2
3
4
5
6
192.168.1.30 nn1
192.168.1.31 nn2
192.168.1.32 nn3
192.168.1.33 s1
192.168.1.34 s2
192.168.1.35 s3
\n

创建hadoop用户

1
2
3
4
#创建hadoop用户 
useradd hadoop
#给hadoop用户设置密码: 12345678
passwd hadoop
\n

禁止非 wheel 组用户切换到root,配置免密切换root

\n

给hadoop用户,配置SSH密钥

配置hadoop用户ssh免密码登录到hadoop

\n

配置hadoop用户ssh免密码登录到root

\n

脚本配置

\n"},{"title":"Hadoop集群HDFS配置","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":61252,"date":"2024-09-11T14:45:40.000Z","_content":"\n### 上传`hadoop-3.1.4.tar.gz`到`/tmp`,解压\n>注意在六台机器均上传到`/tmp`\n```bash\n# 在6台机器执行\nsudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/\n# 分发到其他主机\nssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4\nssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop\n```\n### 配置环境变量\n```bash\necho 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh\n```\n\n```bash\n# 分发到nn2、nn3、s1、s2、s3\nscp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n# source 环境变量\nssh_root.sh source /etc/profile\n```\n>还需要创建 `/data`这个目录,由于nn1、nn2、nn3已经创建`/data`,其他三台需要创建一下\n```bash\n### 在s1、s2、s3执行\nsudo mkdir /data\nsudo chown -R hadoop:hadoop /data\n```\n\n### 修改core-site.xml\n```bash\nvim /usr/local/hadoop/etc/hadoop/core-site.xml \n```\n\n```xml\n\n\t\n\t fs.defaultFS\n\t hdfs://ns1\n\t 默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name\n\t\n\t\n\t\n\t hadoop.tmp.dir\n\t /data/tmp\n\t 数据存储目录\n\t\n\t\n\t\n\t hadoop.proxyuser.root.groups\n\t hadoop\n\t \n\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,\n\t yarn rmadmin –refreshSuperUserGroupsConfiguration\n\t 使用这两个命令不用重启就能刷新\n\t \n\t\n\t\n\t\n\t hadoop.proxyuser.root.hosts\n\t localhost\n\t 本地代理\n\t\n\t\n\t\n\t \n\t\tha.zookeeper.quorum \n\t\tnn1:2181,nn2:2181,nn3:2181 \n\t\tHA使用的zookeeper地址 \n\t\n\n```\n### 修改`hdfs-site.xml`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hdfs-site.xml \n```\n\n```xml\n\n \n dfs.namenode.name.dir\n /data/namenode\n namenode本地文件存放地址\n \n \n \n dfs.nameservices\n ns1\n 提供服务的NS逻辑名称,与core-site.xml里的对应\n \n \n \n \n \n dfs.ha.namenodes.ns1\n nn1,nn2,nn3\n 列出该逻辑名称下的NameNode逻辑名称\n \n \n \n dfs.namenode.rpc-address.ns1.nn1\n nn1:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn1\n nn1:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn2\n nn2:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn2\n nn2:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn3\n nn3:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn3\n nn3:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.handler.count\n 77\n namenode的工作线程数\n \n\n \n \n dfs.namenode.shared.edits.dir\n qjournal://nn1:8485;nn2:8485;nn3:8485/ns1\n 指定用于HA存放edits的共享存储,通常是namenode的所在机器\n \n \n \n dfs.journalnode.edits.dir\n /data/journaldata/\n journaldata服务存放文件的地址\n \n \n \n ipc.client.connect.max.retries\n 10\n namenode和journalnode的链接重试次数10次\n \n \n \n ipc.client.connect.retry.interval\n 10000\n 重试的间隔时间10s\n \n\n \n \n dfs.ha.fencing.methods\n sshfence\n 指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述\n \n \n \n dfs.ha.fencing.ssh.private-key-files\n /home/hadoop/.ssh/id_rsa\n 杀死命令脚本的免密配置秘钥\n \n \n \n dfs.client.failover.proxy.provider.ns1\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n 指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类\n \n \n \n dfs.client.failover.proxy.provider.auto-ha\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n \n \n \n dfs.ha.automatic-failover.enabled\n true\n \n\t\n\t\n\t dfs.datanode.data.dir\n\t /data/datanode\n\t datanode本地文件存放地址\n\t\n\t\n\t dfs.replication\n\t 3\n\t 文件复本数\n\t\n\t\n\t dfs.namenode.datanode.registration.ip-hostname-check\n\t false\n\t\n\t\n\t dfs.client.use.datanode.hostname\n\t true\n\t\n\t\n\t dfs.datanode.use.datanode.hostname\n\t true\n\t\n\n```\n### 修改`hadoop-env.sh`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hadoop-env.sh\n```\n\n```bash\n# 添加这两行\nsource /etc/profile \nexport HADOOP_HEAPSIZE_MAX=512\n```\n### 分发这些配置文件\n```bash\nscp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/\n```\n### 集群初始化\n- 需要先启动zookeeper集群\n ```bash\n ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n ```\n\n ```bash\n # 第一次启动先启动journalnode,便于3个namenode的元数据同步\n ssh_all_zookeeper.sh hadoop-daemon.sh start journalnode\n ```\n- `zkfc`搭建\n ```bash\n #在nn1节点执行 \n hdfs zkfc -formatZK\n #nn1 nn2 nn3启动zkfc \n hadoop-daemon.sh start zkfc\n ```\n- 初始化nn1的namenode,在nn1执行\n ```bash\n hdfs namenode -format \n hadoop-daemon.sh start namenode\n ```\n- 格式化第二台和第三台namenode,并且启动namenode,在nn2、nn3执行\n ```bash\n hdfs namenode -bootstrapStandby \n hadoop-daemon.sh start namenode\n ```\n- 修改**workers**\n ```bash\n vim /usr/local/hadoop/etc/hadoop/workers\n ```\n 修改为\n ```text\n s1\n s2\n s3\n ```\n 分发给其他机器\n ```bash\n scp_all.sh /usr/local/hadoop/etc/hadoop/workers /usr/local/hadoop/etc/hadoop\n ```\n- 启动datanode节点,在s1、s2、s3执行\n\n ```bash\n #启动各个节点的datanode\n hadoop-daemons.sh start datanode\n ```\n### 集群启动\n```bash\nstart-dfs.sh\n```","source":"_posts/bigdata/hadoop/hdfs.md","raw":"---\ntitle: Hadoop集群HDFS配置\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 61252\ndate: 2024-09-011 22:45:40\n---\n\n### 上传`hadoop-3.1.4.tar.gz`到`/tmp`,解压\n>注意在六台机器均上传到`/tmp`\n```bash\n# 在6台机器执行\nsudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/\n# 分发到其他主机\nssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4\nssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop\n```\n### 配置环境变量\n```bash\necho 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh\necho 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh\n```\n\n```bash\n# 分发到nn2、nn3、s1、s2、s3\nscp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n# source 环境变量\nssh_root.sh source /etc/profile\n```\n>还需要创建 `/data`这个目录,由于nn1、nn2、nn3已经创建`/data`,其他三台需要创建一下\n```bash\n### 在s1、s2、s3执行\nsudo mkdir /data\nsudo chown -R hadoop:hadoop /data\n```\n\n### 修改core-site.xml\n```bash\nvim /usr/local/hadoop/etc/hadoop/core-site.xml \n```\n\n```xml\n\n\t\n\t fs.defaultFS\n\t hdfs://ns1\n\t 默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name\n\t\n\t\n\t\n\t hadoop.tmp.dir\n\t /data/tmp\n\t 数据存储目录\n\t\n\t\n\t\n\t hadoop.proxyuser.root.groups\n\t hadoop\n\t \n\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,\n\t yarn rmadmin –refreshSuperUserGroupsConfiguration\n\t 使用这两个命令不用重启就能刷新\n\t \n\t\n\t\n\t\n\t hadoop.proxyuser.root.hosts\n\t localhost\n\t 本地代理\n\t\n\t\n\t\n\t \n\t\tha.zookeeper.quorum \n\t\tnn1:2181,nn2:2181,nn3:2181 \n\t\tHA使用的zookeeper地址 \n\t\n\n```\n### 修改`hdfs-site.xml`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hdfs-site.xml \n```\n\n```xml\n\n \n dfs.namenode.name.dir\n /data/namenode\n namenode本地文件存放地址\n \n \n \n dfs.nameservices\n ns1\n 提供服务的NS逻辑名称,与core-site.xml里的对应\n \n \n \n \n \n dfs.ha.namenodes.ns1\n nn1,nn2,nn3\n 列出该逻辑名称下的NameNode逻辑名称\n \n \n \n dfs.namenode.rpc-address.ns1.nn1\n nn1:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn1\n nn1:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn2\n nn2:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn2\n nn2:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.rpc-address.ns1.nn3\n nn3:9000\n 指定NameNode的RPC位置\n \n \n \n dfs.namenode.http-address.ns1.nn3\n nn3:50070\n 指定NameNode的Web Server位置\n \n \n \n dfs.namenode.handler.count\n 77\n namenode的工作线程数\n \n\n \n \n dfs.namenode.shared.edits.dir\n qjournal://nn1:8485;nn2:8485;nn3:8485/ns1\n 指定用于HA存放edits的共享存储,通常是namenode的所在机器\n \n \n \n dfs.journalnode.edits.dir\n /data/journaldata/\n journaldata服务存放文件的地址\n \n \n \n ipc.client.connect.max.retries\n 10\n namenode和journalnode的链接重试次数10次\n \n \n \n ipc.client.connect.retry.interval\n 10000\n 重试的间隔时间10s\n \n\n \n \n dfs.ha.fencing.methods\n sshfence\n 指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述\n \n \n \n dfs.ha.fencing.ssh.private-key-files\n /home/hadoop/.ssh/id_rsa\n 杀死命令脚本的免密配置秘钥\n \n \n \n dfs.client.failover.proxy.provider.ns1\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n 指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类\n \n \n \n dfs.client.failover.proxy.provider.auto-ha\n org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\n \n \n \n dfs.ha.automatic-failover.enabled\n true\n \n\t\n\t\n\t dfs.datanode.data.dir\n\t /data/datanode\n\t datanode本地文件存放地址\n\t\n\t\n\t dfs.replication\n\t 3\n\t 文件复本数\n\t\n\t\n\t dfs.namenode.datanode.registration.ip-hostname-check\n\t false\n\t\n\t\n\t dfs.client.use.datanode.hostname\n\t true\n\t\n\t\n\t dfs.datanode.use.datanode.hostname\n\t true\n\t\n\n```\n### 修改`hadoop-env.sh`\n```bash\nvim /usr/local/hadoop/etc/hadoop/hadoop-env.sh\n```\n\n```bash\n# 添加这两行\nsource /etc/profile \nexport HADOOP_HEAPSIZE_MAX=512\n```\n### 分发这些配置文件\n```bash\nscp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/\nscp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/\n```\n### 集群初始化\n- 需要先启动zookeeper集群\n ```bash\n ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n ```\n\n ```bash\n # 第一次启动先启动journalnode,便于3个namenode的元数据同步\n ssh_all_zookeeper.sh hadoop-daemon.sh start journalnode\n ```\n- `zkfc`搭建\n ```bash\n #在nn1节点执行 \n hdfs zkfc -formatZK\n #nn1 nn2 nn3启动zkfc \n hadoop-daemon.sh start zkfc\n ```\n- 初始化nn1的namenode,在nn1执行\n ```bash\n hdfs namenode -format \n hadoop-daemon.sh start namenode\n ```\n- 格式化第二台和第三台namenode,并且启动namenode,在nn2、nn3执行\n ```bash\n hdfs namenode -bootstrapStandby \n hadoop-daemon.sh start namenode\n ```\n- 修改**workers**\n ```bash\n vim /usr/local/hadoop/etc/hadoop/workers\n ```\n 修改为\n ```text\n s1\n s2\n s3\n ```\n 分发给其他机器\n ```bash\n scp_all.sh /usr/local/hadoop/etc/hadoop/workers /usr/local/hadoop/etc/hadoop\n ```\n- 启动datanode节点,在s1、s2、s3执行\n\n ```bash\n #启动各个节点的datanode\n hadoop-daemons.sh start datanode\n ```\n### 集群启动\n```bash\nstart-dfs.sh\n```","slug":"bigdata/hadoop/hdfs","published":1,"updated":"2024-09-11T14:51:42.712Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdv000qtgah29015x3b","content":"

上传hadoop-3.1.4.tar.gz/tmp,解压

\n

注意在六台机器均上传到/tmp

\n
\n
1
2
3
4
5
# 在6台机器执行
sudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/
# 分发到其他主机
ssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4
ssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop
\n

配置环境变量

1
2
3
echo 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh
\n\n
1
2
3
4
# 分发到nn2、nn3、s1、s2、s3
scp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/
# source 环境变量
ssh_root.sh source /etc/profile
\n
\n

还需要创建 /data这个目录,由于nn1、nn2、nn3已经创建/data,其他三台需要创建一下

\n
\n
1
2
3
### 在s1、s2、s3执行
sudo mkdir /data
sudo chown -R hadoop:hadoop /data
\n\n

修改core-site.xml

1
vim /usr/local/hadoop/etc/hadoop/core-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
<configuration>
\t<property>
\t <name>fs.defaultFS</name>
\t <value>hdfs://ns1</value>
\t <description>默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name</description>
\t</property>
\t
\t<property>
\t <name>hadoop.tmp.dir</name>
\t <value>/data/tmp</value>
\t <description>数据存储目录</description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.groups</name>
\t <value>hadoop</value>
\t <description>
\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,
\t yarn rmadmin –refreshSuperUserGroupsConfiguration
\t 使用这两个命令不用重启就能刷新
\t </description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.hosts</name>
\t <value>localhost</value>
\t <description>本地代理</description>
\t</property>
\t
\t<!-- zkfc的配置 -->
\t<property>
\t\t<name>ha.zookeeper.quorum</name>
\t\t<value>nn1:2181,nn2:2181,nn3:2181</value>
\t\t<description>HA使用的zookeeper地址</description>
\t</property>
</configuration>
\n

修改hdfs-site.xml

1
vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data/namenode</value>
<description>namenode本地文件存放地址</description>
</property>

<property>
<name>dfs.nameservices</name>
<value>ns1</value>
<description>提供服务的NS逻辑名称,与core-site.xml里的对应</description>
</property>

<!-- namenode的配置 -->
<!-- 主要的 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2,nn3</value>
<description>列出该逻辑名称下的NameNode逻辑名称</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>nn1:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>nn1:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>nn2:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>nn2:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn3</name>
<value>nn3:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn3</name>
<value>nn3:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.handler.count</name>
<value>77</value>
<description>namenode的工作线程数</description>
</property>

<!-- journaldata配置,使得其他两个namenode同步第一个namenode数据 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://nn1:8485;nn2:8485;nn3:8485/ns1</value>
<description>指定用于HA存放edits的共享存储,通常是namenode的所在机器</description>
</property>

<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data/journaldata/</value>
<description>journaldata服务存放文件的地址</description>
</property>

<property>
<name>ipc.client.connect.max.retries</name>
<value>10</value>
<description>namenode和journalnode的链接重试次数10次</description>
</property>

<property>
<name>ipc.client.connect.retry.interval</name>
<value>10000</value>
<description>重试的间隔时间10s</description>
</property>

<!-- zkfc的配置 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<description>指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述</description>
</property>

<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
<description>杀死命令脚本的免密配置秘钥</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
<description>指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.auto-ha</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
\t<!-- datanode配置 -->
\t<property>
\t <name>dfs.datanode.data.dir</name>
\t <value>/data/datanode</value>
\t <description>datanode本地文件存放地址</description>
\t</property>
\t<property>
\t <name>dfs.replication</name>
\t <value>3</value>
\t <description>文件复本数</description>
\t</property>
\t<property>
\t <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
\t <value>false</value>
\t</property>
\t<property>
\t <name>dfs.client.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
\t<property>
\t <name>dfs.datanode.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
</configuration>
\n

修改hadoop-env.sh

1
vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
\n\n
1
2
3
# 添加这两行
source /etc/profile
export HADOOP_HEAPSIZE_MAX=512
\n

分发这些配置文件

1
2
3
scp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/
\n

集群初始化

\n

集群启动

1
start-dfs.sh
","cover":false,"excerpt":"","more":"

上传hadoop-3.1.4.tar.gz/tmp,解压

\n

注意在六台机器均上传到/tmp

\n
\n
1
2
3
4
5
# 在6台机器执行
sudo tar -zxvf /tmp/hadoop-3.1.4.tar.gz -C /usr/local/
# 分发到其他主机
ssh_root.sh chown -R hadoop:hadoop /usr/local/hadoop-3.1.4
ssh_root.sh ln -s /usr/local/hadoop-3.1.4/ /usr/local/hadoop
\n

配置环境变量

1
2
3
echo 'export HADOOP_HOME=/usr/local/hadoop' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/bin' >> /etc/profile.d/myEnv.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin' >> /etc/profile.d/myEnv.sh
\n\n
1
2
3
4
# 分发到nn2、nn3、s1、s2、s3
scp_all.sh /etc/profile.d/myEnv.sh /etc/profile.d/
# source 环境变量
ssh_root.sh source /etc/profile
\n
\n

还需要创建 /data这个目录,由于nn1、nn2、nn3已经创建/data,其他三台需要创建一下

\n
\n
1
2
3
### 在s1、s2、s3执行
sudo mkdir /data
sudo chown -R hadoop:hadoop /data
\n\n

修改core-site.xml

1
vim /usr/local/hadoop/etc/hadoop/core-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
<configuration>
\t<property>
\t <name>fs.defaultFS</name>
\t <value>hdfs://ns1</value>
\t <description>默认文件服务的协议和NS逻辑名称,和hdfs-site.xml里的对应此配置替代了1.0里的fs.default.name</description>
\t</property>
\t
\t<property>
\t <name>hadoop.tmp.dir</name>
\t <value>/data/tmp</value>
\t <description>数据存储目录</description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.groups</name>
\t <value>hadoop</value>
\t <description>
\t hdfs dfsadmin –refreshSuperUserGroupsConfiguration,
\t yarn rmadmin –refreshSuperUserGroupsConfiguration
\t 使用这两个命令不用重启就能刷新
\t </description>
\t</property>
\t
\t<property>
\t <name>hadoop.proxyuser.root.hosts</name>
\t <value>localhost</value>
\t <description>本地代理</description>
\t</property>
\t
\t<!-- zkfc的配置 -->
\t<property>
\t\t<name>ha.zookeeper.quorum</name>
\t\t<value>nn1:2181,nn2:2181,nn3:2181</value>
\t\t<description>HA使用的zookeeper地址</description>
\t</property>
</configuration>
\n

修改hdfs-site.xml

1
vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml 
\n\n
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data/namenode</value>
<description>namenode本地文件存放地址</description>
</property>

<property>
<name>dfs.nameservices</name>
<value>ns1</value>
<description>提供服务的NS逻辑名称,与core-site.xml里的对应</description>
</property>

<!-- namenode的配置 -->
<!-- 主要的 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2,nn3</value>
<description>列出该逻辑名称下的NameNode逻辑名称</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>nn1:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>nn1:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>nn2:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>nn2:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1.nn3</name>
<value>nn3:9000</value>
<description>指定NameNode的RPC位置</description>
</property>

<property>
<name>dfs.namenode.http-address.ns1.nn3</name>
<value>nn3:50070</value>
<description>指定NameNode的Web Server位置</description>
</property>

<property>
<name>dfs.namenode.handler.count</name>
<value>77</value>
<description>namenode的工作线程数</description>
</property>

<!-- journaldata配置,使得其他两个namenode同步第一个namenode数据 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://nn1:8485;nn2:8485;nn3:8485/ns1</value>
<description>指定用于HA存放edits的共享存储,通常是namenode的所在机器</description>
</property>

<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data/journaldata/</value>
<description>journaldata服务存放文件的地址</description>
</property>

<property>
<name>ipc.client.connect.max.retries</name>
<value>10</value>
<description>namenode和journalnode的链接重试次数10次</description>
</property>

<property>
<name>ipc.client.connect.retry.interval</name>
<value>10000</value>
<description>重试的间隔时间10s</description>
</property>

<!-- zkfc的配置 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<description>指定HA做隔离的方法,缺省是ssh,可设为shell,稍后详述</description>
</property>

<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
<description>杀死命令脚本的免密配置秘钥</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
<description>指定客户端用于HA切换的代理类,不同的NS可以用不同的代理类以上示例为Hadoop 2.0自带的缺省代理类</description>
</property>

<property>
<name>dfs.client.failover.proxy.provider.auto-ha</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
\t<!-- datanode配置 -->
\t<property>
\t <name>dfs.datanode.data.dir</name>
\t <value>/data/datanode</value>
\t <description>datanode本地文件存放地址</description>
\t</property>
\t<property>
\t <name>dfs.replication</name>
\t <value>3</value>
\t <description>文件复本数</description>
\t</property>
\t<property>
\t <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
\t <value>false</value>
\t</property>
\t<property>
\t <name>dfs.client.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
\t<property>
\t <name>dfs.datanode.use.datanode.hostname</name>
\t <value>true</value>
\t</property>
</configuration>
\n

修改hadoop-env.sh

1
vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
\n\n
1
2
3
# 添加这两行
source /etc/profile
export HADOOP_HEAPSIZE_MAX=512
\n

分发这些配置文件

1
2
3
scp_all.sh /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/
scp_all.sh /usr/local/hadoop/etc/hadoop/hadoop-env.sh /usr/local/hadoop/etc/hadoop/
\n

集群初始化

\n

集群启动

1
start-dfs.sh
"},{"title":"Hadoop集群Zookeeper配置","top_img":"/img/site01.jpg","top_img_height":"800px","abbrlink":61251,"date":"2024-09-11T14:45:40.000Z","_content":"\n### Zookeeper脚本配置\n- 拷贝ips\n ```bash\n cp ips ips_zookeeper\n ```\n 修改为\n ```bash\n nn1\n nn2\n nn3\n ```\n- 拷贝三个脚本\n ```bash\n cp scp_all.sh scp_all_zookeeper.sh \n cp ssh_all.sh ssh_all_zookeeper.sh \n cp ssh_root.sh ssh_root_zookeeper.sh\n ```\n- 修改脚本\n ```shell\n vim scp_all_zookeeper.sh \n vim ssh_all_zookeeper.sh \n vim ssh_root_zookeeper.sh \n ```\n\n 将三个脚本中的ips改为ips_zookeeper\n### Zookeeper安装\n- 上传到`/tmp`目录下,解压\n ```bash\n sudo tar -zxvf /tmp/zookeeper-3.4.8.tar.gz -C /usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn2:/usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn3:/usr/local/\n ssh_root_zookeeper.sh chown -R hadoop:hadoop /usr/local/zookeeper-3.4.8\n ssh_root_zookeeper.s ln -s /usr/local/zookeeper-3.4.8/ /usr/local/zookeeper\n ```\n### Zookeeper配置\n - zoo.cfg配置\n ```bash\n cd /usr/local/zookeeper/conf/\n cp zoo_sample.cfg zoo.cfg\n ```\n 然后`vim zoo.cfg`,修改如下:\n ```properties\n # 修改dataDir\n dataDir=/data/zookeeper\n # 添加一下内容\n server.1=nn1:2888:3888 \n server.2=nn2:2888:3888 \n server.3=nn3:2888:3888\n ```\n 分发给nn2、nn3\n `scp_all_zookeeper.sh /usr/local/zookeeper/conf/zoo.cfg /usr/local/zookeeper/conf/`\n\n- `zkEnv.sh`配置\n `vim /usr/local/zookeeper/bin/zkEnv.sh`\n ![000001.png](..%2F..%2F..%2Fimg%2F000001.png)\n 分发到nn2、nn3\n```bash\nscp_all_zookeeper.sh /usr/local/zookeeper/bin/zkEnv.sh /usr/local/zookeeper/bin/\n```\n- 创建zookeeper数据目录\n```bash\nssh_root_zookeeper.sh mkdir -p /data/zookeeper\nssh_root_zookeeper.sh chown -R hadoop:hadoop /data\n```\n- 创建myid文件\n ```bash\n ssh nn1 'echo \"1\" > /data/zookeeper/myid'\n ssh nn2 'echo \"2\" > /data/zookeeper/myid'\n ssh nn3 'echo \"3\" > /data/zookeeper/myid'\n ```\n- 配置Zookeeper环境变量\n ```bash\n # 在其他所有主机也执行\n sudo chown -R hadoop:hadoop /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n echo 'export ZOOKEEPER_HOME=/usr/local/zookeeper' >> /etc/profile.d/myEnv.sh\n echo 'export PATH=$PATH:$ZOOKEEPER_HOME/bin' >> /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n # 分发到nn2、nn3\n scp_all_zookeeper.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n # source 环境变量\n ssh_all_zookeeper.sh source /etc/profile\n ```\n### Zookeeper的命令\n```bash\n#启动zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n#查看每个机器ZK运行的状态\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status\n#整体停止服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop \n#重启zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart\n```\n\n```shell\n#启动zookeeper客户端,并连接zookeeper集群\n/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181\n# 可以简化为:\nzkCli.sh\n```\n\n","source":"_posts/bigdata/hadoop/zookeper.md","raw":"---\ntitle: Hadoop集群Zookeeper配置\ntop_img: /img/site01.jpg\ntop_img_height: 800px\nabbrlink: 61251\ndate: 2024-09-011 22:45:40\n---\n\n### Zookeeper脚本配置\n- 拷贝ips\n ```bash\n cp ips ips_zookeeper\n ```\n 修改为\n ```bash\n nn1\n nn2\n nn3\n ```\n- 拷贝三个脚本\n ```bash\n cp scp_all.sh scp_all_zookeeper.sh \n cp ssh_all.sh ssh_all_zookeeper.sh \n cp ssh_root.sh ssh_root_zookeeper.sh\n ```\n- 修改脚本\n ```shell\n vim scp_all_zookeeper.sh \n vim ssh_all_zookeeper.sh \n vim ssh_root_zookeeper.sh \n ```\n\n 将三个脚本中的ips改为ips_zookeeper\n### Zookeeper安装\n- 上传到`/tmp`目录下,解压\n ```bash\n sudo tar -zxvf /tmp/zookeeper-3.4.8.tar.gz -C /usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn2:/usr/local/\n scp -r /usr/local/zookeeper-3.4.8/ root@nn3:/usr/local/\n ssh_root_zookeeper.sh chown -R hadoop:hadoop /usr/local/zookeeper-3.4.8\n ssh_root_zookeeper.s ln -s /usr/local/zookeeper-3.4.8/ /usr/local/zookeeper\n ```\n### Zookeeper配置\n - zoo.cfg配置\n ```bash\n cd /usr/local/zookeeper/conf/\n cp zoo_sample.cfg zoo.cfg\n ```\n 然后`vim zoo.cfg`,修改如下:\n ```properties\n # 修改dataDir\n dataDir=/data/zookeeper\n # 添加一下内容\n server.1=nn1:2888:3888 \n server.2=nn2:2888:3888 \n server.3=nn3:2888:3888\n ```\n 分发给nn2、nn3\n `scp_all_zookeeper.sh /usr/local/zookeeper/conf/zoo.cfg /usr/local/zookeeper/conf/`\n\n- `zkEnv.sh`配置\n `vim /usr/local/zookeeper/bin/zkEnv.sh`\n ![000001.png](..%2F..%2F..%2Fimg%2F000001.png)\n 分发到nn2、nn3\n```bash\nscp_all_zookeeper.sh /usr/local/zookeeper/bin/zkEnv.sh /usr/local/zookeeper/bin/\n```\n- 创建zookeeper数据目录\n```bash\nssh_root_zookeeper.sh mkdir -p /data/zookeeper\nssh_root_zookeeper.sh chown -R hadoop:hadoop /data\n```\n- 创建myid文件\n ```bash\n ssh nn1 'echo \"1\" > /data/zookeeper/myid'\n ssh nn2 'echo \"2\" > /data/zookeeper/myid'\n ssh nn3 'echo \"3\" > /data/zookeeper/myid'\n ```\n- 配置Zookeeper环境变量\n ```bash\n # 在其他所有主机也执行\n sudo chown -R hadoop:hadoop /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n echo 'export ZOOKEEPER_HOME=/usr/local/zookeeper' >> /etc/profile.d/myEnv.sh\n echo 'export PATH=$PATH:$ZOOKEEPER_HOME/bin' >> /etc/profile.d/myEnv.sh\n ```\n\n ```bash\n # 分发到nn2、nn3\n scp_all_zookeeper.sh /etc/profile.d/myEnv.sh /etc/profile.d/\n # source 环境变量\n ssh_all_zookeeper.sh source /etc/profile\n ```\n### Zookeeper的命令\n```bash\n#启动zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start\n#查看每个机器ZK运行的状态\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status\n#整体停止服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop \n#重启zk服务\nssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart\n```\n\n```shell\n#启动zookeeper客户端,并连接zookeeper集群\n/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181\n# 可以简化为:\nzkCli.sh\n```\n\n","slug":"bigdata/hadoop/zookeper","published":1,"updated":"2024-09-11T14:51:42.706Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdv000rtgah4ojt276r","content":"

Zookeeper脚本配置

\n

Zookeeper安装

\n

Zookeeper配置

\n

Zookeeper的命令

1
2
3
4
5
6
7
8
#启动zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start
#查看每个机器ZK运行的状态
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status
#整体停止服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop
#重启zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart
\n\n
1
2
3
4
#启动zookeeper客户端,并连接zookeeper集群
/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181
# 可以简化为:
zkCli.sh
\n\n","cover":false,"excerpt":"","more":"

Zookeeper脚本配置

\n

Zookeeper安装

\n

Zookeeper配置

\n

Zookeeper的命令

1
2
3
4
5
6
7
8
#启动zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start
#查看每个机器ZK运行的状态
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status
#整体停止服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop
#重启zk服务
ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh restart
\n\n
1
2
3
4
#启动zookeeper客户端,并连接zookeeper集群
/usr/local/zookeeper/bin/zkCli.sh -server nn1:2181
# 可以简化为:
zkCli.sh
\n\n"},{"title":"无法访问外网?需要订阅代理服务?","abbrlink":14011,"date":"2024-08-07T02:06:08.000Z","_content":"\n{% note info %}\n**由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的\n的网站将被屏蔽拦截,接下来我将给出一种使用`VPN`服务的可行的方案来保证服务的可靠性。**\n{% endnote %}\n\n### 介绍\n> 根据测试,许多提供服务的提供商所在的网站需要使用`外部网络`才能打开,仅有少部分的网站(**比较贵**)可以直接使用\n> 国内网络环境打开直接购买订阅服务。\n>\n\n那么你现在可以有两个选择:\n1. **方案一**:使用无需`外部网络`便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。\n2. **方案二**:如果使用此方案,详见下方。\n\n\n### 解决方案\n> 采用**方案二**方式\n> \n> 这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)\n> \n> 此教程中我们使用的机场是 \n> 1. `一元机场`: https://xn--4gq62f52gdss.com/\n> 2. `赔钱机场`:https://xn--mes358aby2apfg.com/\n\n### 机场选择的建议:\n\n- `一元机场`\n ![](/img/yiyuan.png)\n 可以看到\n - `12元/年`,每月50GB的可用额度,下个月重置流量额度\n - `15元/季`,即为`60元/年`,每月有4000GB的可用额度,下个月重置流量额度\n - `7元/月`,即为`84元/年`,每个月8000GB的可用额度,下个月重置流量额度\n 根据我个人的使用情况,大多数情况下我每月的流量使用未超过50GB,如果没有频繁的流量使用,\n 建议选择`12元/年`,否则可以选择`15元/季`,这意味着每月将有4000GB的可用额度\n\n- `赔钱机场`\n\n ![](/img/peiqian.png)\n `赔钱机场`的订阅共有9种方案,这里我仅显示自己正在使用的,个人认为十分优惠:\n - `34.99元/年`,每月500GB的可用额度,根据我观察和使用,这个订阅方案比`一元机场`的性价比更高,且流量使用额度也不用担心\n \n### 如何订阅?\n{% note success %}\n由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱`15202078626@163.com`发送你的订阅计划方案,\n扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)\n{% endnote %}\n\n\n### 完成订阅后如何使用?\n> 你可以在 `Windows`、`Mac`、`Android`等平台使用此服务\n> 使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60\n\n\n ","source":"_posts/net/jichang/jichang.md","raw":"---\ntitle: 无法访问外网?需要订阅代理服务?\ntags: 网络代理\nabbrlink: 14011\ndate: 2024-08-07 10:06:08\n---\n\n{% note info %}\n**由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的\n的网站将被屏蔽拦截,接下来我将给出一种使用`VPN`服务的可行的方案来保证服务的可靠性。**\n{% endnote %}\n\n### 介绍\n> 根据测试,许多提供服务的提供商所在的网站需要使用`外部网络`才能打开,仅有少部分的网站(**比较贵**)可以直接使用\n> 国内网络环境打开直接购买订阅服务。\n>\n\n那么你现在可以有两个选择:\n1. **方案一**:使用无需`外部网络`便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。\n2. **方案二**:如果使用此方案,详见下方。\n\n\n### 解决方案\n> 采用**方案二**方式\n> \n> 这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)\n> \n> 此教程中我们使用的机场是 \n> 1. `一元机场`: https://xn--4gq62f52gdss.com/\n> 2. `赔钱机场`:https://xn--mes358aby2apfg.com/\n\n### 机场选择的建议:\n\n- `一元机场`\n ![](/img/yiyuan.png)\n 可以看到\n - `12元/年`,每月50GB的可用额度,下个月重置流量额度\n - `15元/季`,即为`60元/年`,每月有4000GB的可用额度,下个月重置流量额度\n - `7元/月`,即为`84元/年`,每个月8000GB的可用额度,下个月重置流量额度\n 根据我个人的使用情况,大多数情况下我每月的流量使用未超过50GB,如果没有频繁的流量使用,\n 建议选择`12元/年`,否则可以选择`15元/季`,这意味着每月将有4000GB的可用额度\n\n- `赔钱机场`\n\n ![](/img/peiqian.png)\n `赔钱机场`的订阅共有9种方案,这里我仅显示自己正在使用的,个人认为十分优惠:\n - `34.99元/年`,每月500GB的可用额度,根据我观察和使用,这个订阅方案比`一元机场`的性价比更高,且流量使用额度也不用担心\n \n### 如何订阅?\n{% note success %}\n由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱`15202078626@163.com`发送你的订阅计划方案,\n扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)\n{% endnote %}\n\n\n### 完成订阅后如何使用?\n> 你可以在 `Windows`、`Mac`、`Android`等平台使用此服务\n> 使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60\n\n\n ","slug":"net/jichang/jichang","published":1,"updated":"2024-08-09T12:21:50.036Z","comments":1,"layout":"post","photos":[],"_id":"cm5w9lrdv000stgah26ty73mb","content":"

由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的
的网站将被屏蔽拦截,接下来我将给出一种使用VPN服务的可行的方案来保证服务的可靠性。

\n
\n\n

介绍

\n

根据测试,许多提供服务的提供商所在的网站需要使用外部网络才能打开,仅有少部分的网站(比较贵)可以直接使用
国内网络环境打开直接购买订阅服务。

\n
\n

那么你现在可以有两个选择:

\n
    \n
  1. 方案一:使用无需外部网络便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。
  2. \n
  3. 方案二:如果使用此方案,详见下方。
  4. \n
\n

解决方案

\n

采用方案二方式

\n

这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)

\n

此教程中我们使用的机场是

\n
    \n
  1. 一元机场: https://xn--4gq62f52gdss.com/
  2. \n
  3. 赔钱机场https://xn--mes358aby2apfg.com/
  4. \n
\n
\n

机场选择的建议:

\n

如何订阅?

由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱15202078626@163.com发送你的订阅计划方案,
扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)

\n
\n\n\n

完成订阅后如何使用?

\n

你可以在 WindowsMacAndroid等平台使用此服务
使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60

\n
\n","cover":false,"excerpt":"","more":"

由于中国大陆的GFW(防火墙)限制,无法访问外网网络,因此需要访问像GitHub、YouTube这样的
的网站将被屏蔽拦截,接下来我将给出一种使用VPN服务的可行的方案来保证服务的可靠性。

\n
\n\n

介绍

\n

根据测试,许多提供服务的提供商所在的网站需要使用外部网络才能打开,仅有少部分的网站(比较贵)可以直接使用
国内网络环境打开直接购买订阅服务。

\n
\n

那么你现在可以有两个选择:

\n
    \n
  1. 方案一:使用无需外部网络便能开通订阅服务的VPN,费用高,如果你选择此方案,那么你可自行搜索解决,此处仅仅讨论方案二。
  2. \n
  3. 方案二:如果使用此方案,详见下方。
  4. \n
\n

解决方案

\n

采用方案二方式

\n

这是一些订阅服务推广的链接: https://9.234456.xyz/abc.html?t=638586217737356738 (此链接打开无需使用VPN,但进入对应的机场页面却仍无法打开)

\n

此教程中我们使用的机场是

\n
    \n
  1. 一元机场: https://xn--4gq62f52gdss.com/
  2. \n
  3. 赔钱机场https://xn--mes358aby2apfg.com/
  4. \n
\n
\n

机场选择的建议:

\n

如何订阅?

由于需要外部网络才能完成订阅服务的购买,你可以向我的邮箱15202078626@163.com发送你的订阅计划方案,
扫描付款二维码,我将为你开通订阅(您只需要付款对应的订阅金额即可)

\n
\n\n\n

完成订阅后如何使用?

\n

你可以在 WindowsMacAndroid等平台使用此服务
使用订阅的对应链接: https://flowus.cn/shenjian/22f76d4f-e7b3-4b8a-8a89-561566f6eb60

\n
\n"}],"PostAsset":[],"PostCategory":[{"post_id":"cm5w9lrdn000dtgah04audthw","category_id":"cm5w9lrdp000htgahcgif26p4","_id":"cm5w9lrds000ltgahfnvufs59"}],"PostTag":[{"post_id":"cm5w9lrdi0005tgah797v3otl","tag_id":"cm5w9lrdk0008tgahcjy0gsmw","_id":"cm5w9lrdo000ftgahcer4h9z8"},{"post_id":"cm5w9lrdn000ctgah8ll3dor6","tag_id":"cm5w9lrdo000etgahbm4uhrtq","_id":"cm5w9lrdq000jtgah8bewbri1"},{"post_id":"cm5w9lrdn000dtgah04audthw","tag_id":"cm5w9lrdq000itgah0ue9cysm","_id":"cm5w9lrds000mtgahhjn02wyq"},{"post_id":"cm5w9lrdo000gtgah38tl6q9j","tag_id":"cm5w9lrdo000etgahbm4uhrtq","_id":"cm5w9lrds000ntgah8fy86a3s"},{"post_id":"cm5w9lrdv000stgah26ty73mb","tag_id":"cm5w9lrdw000ttgah4ix0bjqs","_id":"cm5w9lrdw000utgah31j22idn"}],"Tag":[{"name":"machinelearning","_id":"cm5w9lrdk0008tgahcjy0gsmw"},{"name":"uniapp","_id":"cm5w9lrdo000etgahbm4uhrtq"},{"name":"古文观止","_id":"cm5w9lrdq000itgah0ue9cysm"},{"name":"网络代理","_id":"cm5w9lrdw000ttgah4ix0bjqs"}]}} \ No newline at end of file diff --git a/public/about/index.html b/public/about/index.html index af754d8..ba43eb0 100644 --- a/public/about/index.html +++ b/public/about/index.html @@ -154,14 +154,14 @@ } detectApple() })(window)

落花飞舞,翩若惊鸿。

-
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
+
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
分类
网站资讯
文章数目 :
13
已运行时间 :
本站访客数 :
本站总访问量 :
最后更新时间 :
文章总览 - 1
2025
机器学习
文章总览 - 1
2025
k近邻算法(K-Nearest Neighbors)KNN
文章总览 - 1
2025
机器学习
文章总览 - 1
2025
k近邻算法(K-Nearest Neighbors)KNN
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章

机器学习

k近邻算法(K-Nearest Neighbors)KNN

将当前样本的类别归类于距离最近的k个样本的类别

+ })(window)

k近邻算法(K-Nearest Neighbors)KNN

k近邻算法(K-Nearest Neighbors)KNN

将当前样本的类别归类于距离最近的k个样本的类别

距离公式(2维)

  • 欧式距离
    $$
    d = \sqrt{(x_1-y_1)^2 + (x_2 - y_2)^2}
    $$
  • 曼哈顿距离
    $$
    d = |x_1 - x_2| + |y_1 - y_2|
    $$
  • @@ -184,7 +184,30 @@
  • 标准化
    将数据调整为均值为 0,标准差为 1 的标准正态分布
    $$ z = \frac{x - \mu}{\sigma} $$
    ( z ):标准化后的值 ( x ):原始数据值 ( $\mu$ ):数据的均值 ( $\sigma$):数据的标准差

    1
    from sklearn.preprocessing import StandardScaler # 标准化
-
文章作者: shenjianZ
文章链接: https://rq.shenjianl.cn/posts/29139.html
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 QuickReference
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章

网络相关

文章作者: shenjianZ
文章链接: https://rq.shenjianl.cn/posts/41168.html
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 QuickReference
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站

网络相关

文章作者: shenjianZ
文章链接: https://rq.shenjianl.cn/posts/41168.html
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 QuickReference
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章

郑伯克段于鄢

原文如下:

      初,郑武公娶于申【申国】,曰武姜【武为武公谥号,姜为其宗族之性】。生庄公及共叔段【共表示其曾出逃到共,叔为老三,段为名】。庄公寤生【出生时头先出,难产】,惊姜氏,故名曰“寤生”, 遂恶之,爱【喜爱】共叔段,欲立【立为储君】之,亟(qì)【多次】请于武公,及庄公即位,为之【共叔段】请制【一个叫制的封地,虎牢关所在】。公曰:“制,岩邑【险要的城邑】也,虢叔死焉,佗【通“他”,其他】邑唯命(是听)。”请京,使居之,谓之“京城大叔”。

-
文章作者: shenjianZ
文章链接: https://rq.shenjianl.cn/posts/58638.html
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 QuickReference
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章

uniapp 开发

文章作者: shenjianZ
文章链接: https://rq.shenjianl.cn/posts/58817.html
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 QuickReference
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
+ })(window)
avatar
shenjianZ
一份快捷简便的文档,便于查阅编程的细节
Follow Me
公告
一个简单快捷的文档知识点查阅网站
最新文章
分类
网站资讯
文章数目 :
13
已运行时间 :
本站访客数 :
本站总访问量 :
最后更新时间 :
标签 - machinelearning
2025
机器学习
标签 - machinelearning
2025
k近邻算法(K-Nearest Neighbors)KNN