summaryrefslogtreecommitdiff
path: root/app-i18n/mozc
diff options
context:
space:
mode:
authorV3n3RiX <venerix@koprulu.sector>2022-03-20 00:40:44 +0000
committerV3n3RiX <venerix@koprulu.sector>2022-03-20 00:40:44 +0000
commit4cbcc855382a06088e2f016f62cafdbcb7e40665 (patch)
tree356496503d52354aa6d9f2d36126302fed5f3a73 /app-i18n/mozc
parentfcc5224904648a8e6eb528d7603154160a20022f (diff)
gentoo resync : 20.03.2022
Diffstat (limited to 'app-i18n/mozc')
-rw-r--r--app-i18n/mozc/Manifest19
-rw-r--r--app-i18n/mozc/files/mozc-2.20.2673.102-tests_build.patch27
-rw-r--r--app-i18n/mozc/files/mozc-2.20.2673.102-tests_skipping.patch70
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-environmental_variables.patch132
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-gcc-8.patch22
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-protobuf-3.18.patch13
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch621
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch600
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch583
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch537
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-reiwa.patch35
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-server_path_check.patch95
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-system_libraries.patch274
-rw-r--r--app-i18n/mozc/metadata.xml2
-rw-r--r--app-i18n/mozc/mozc-2.23.2815.102.ebuild369
15 files changed, 1 insertions, 3398 deletions
diff --git a/app-i18n/mozc/Manifest b/app-i18n/mozc/Manifest
index 4b073e661da7..240b26a26ac2 100644
--- a/app-i18n/mozc/Manifest
+++ b/app-i18n/mozc/Manifest
@@ -1,29 +1,12 @@
AUX 50mozc-gentoo.el 121 BLAKE2B 0cad49e4a5a04d1cbdcd063cfd5a06c14bf4bab47890bbc089d8bba9889abb5197bea2cfb912f638052f912d7aca51a7e71b743b4742fd90b9d5ab5a60779fe5 SHA512 2d67a6723374872717ab7132f02f4030f00ddecabb8a1fc20c9d69d64406dc2ba2a1109734378fae7b4718a8ae9601beff4b6f27cb43a280fa2e0f946da70685
-AUX mozc-2.20.2673.102-tests_build.patch 1037 BLAKE2B 8f3c0b3361371a994957a89f5b86020902262de65aef75f82e58fcfcea211b8a8d3c8418898781ac1de2a2c9bdc73ffc4cf68102122a22691d9592d5a02e6c62 SHA512 d99ee7edbb406e123b26ec186933608b42d36193b7bfe67232810c0fbd7a92bd8cfff26dc45eae4f020e82e9a8370cf2033c750250455585b6b3d88cc8c973ef
-AUX mozc-2.20.2673.102-tests_skipping.patch 2086 BLAKE2B a104d6a83b02b49e1208be1b39699b90ad419d8befa29ed0366f7cbc454fe9ce27361f21897a5c74dadfc2d9eb31a30d7237e20db8c1fb205b8e71b40f4a0e57 SHA512 4a2e5a35c5e65ebeb737f656d6efb313c660c2c7cf62abd5d9295fd16e2dffde21d069cd74fd0cfd2454cd7f7c9d136dfbadbf79832db8441c619ff37c7e4b0e
-AUX mozc-2.23.2815.102-environmental_variables.patch 4636 BLAKE2B 2c1d952899b50d0205127fe5f708c8cc8ad115db35f1ebfe5b589550203ee64fe06b0d66b10989c12063feff96f679ebd6ee4562651ac81681019634e6e9c462 SHA512 40e87a52d96794a91f5cf77f387d341b7d58a4b27e3d1455c4230fbe75107c09c3bd4784394437265548ee2704a4d1838cc0965f0333e554484dafe8b106cb7b
-AUX mozc-2.23.2815.102-gcc-8.patch 496 BLAKE2B 318fcda19cf4f7e95920b5df4f5638621bcae992891941fa6f220d4dac1b2eac0faeda7a857a587baed41b361608f20c4bbda0d9a69a60b823572482c6789f46 SHA512 9c2b0e9695f4cd050f0d5f35d0e23f22715b9c6e5607629c7dc023560a5191bd755497fe6fe5306789c00e68d5bd9b7179d5515c982967788fca432134b71cad
-AUX mozc-2.23.2815.102-protobuf-3.18.patch 610 BLAKE2B 7df20afc96279c6419e7941f11f1c71451b23d46efcbb6b57b17472dbaf3c5070e4672b417ffe8ae47774aa586e4fe4e6f792f99eea174a9e765f46dc90e409e SHA512 53e4db422c39a6c31ab2f31bc1ec6f4ca8cb9f025fa140068755201f9c224c40f0191269561b8c6fad479519a7165fd543b0074a49ec9d3985e5b4265bfa7a00
-AUX mozc-2.23.2815.102-python-3_1.patch 19991 BLAKE2B e553f5b3beb7a82f9f91f585a17004f96d9bb3883b432af2f4232211de3d8c4e348cf0d1327fe2e49410112540a01533068ca76464a7deb79429bfa7c49f58c8 SHA512 7d51f282fa3132d279b979ae96d2d7a1a3009c2ad96544033084deb0b739638ae69263b0067172a890d5ca3dd7e47f412af05b8f57ee64505a040cbdb77af388
-AUX mozc-2.23.2815.102-python-3_2.patch 17794 BLAKE2B 53849d003e3821a6d07e5019d0defb9b4558f91938da2367e82cf5327d2c69e13107eb91b7c05b731e1693ca02ca1e61771b81c29d391e412a43fd0fe64973b8 SHA512 a8d017d9b5aa7c89638fcb464a016b8e98b2e20dacc0c68c4362824cad315e0c76c15cabce84059de4a3d2184c1388289f253ebd22f1c640a3946a1189955d72
-AUX mozc-2.23.2815.102-python-3_3.patch 17011 BLAKE2B b7a40ec699da304130ab8b0e149d57ebc1b31c608c03fb35104918e0d33289eb5b40211a18f2083a2addcaed68b691ef2d029c106e2f2fec861f416a2e5f2134 SHA512 52b5cd4ee5e61582f2b9172a927e9e54bf07aea90462448fd63385c7be12c37b19cbdd784a21db3950ec4269249221f4f3bb3570ce0068d5a2448db63a33463c
-AUX mozc-2.23.2815.102-python-3_4.patch 19042 BLAKE2B 18ee638acdb1d086b01034b62e40c1bbd1ac47c43447bf4c3189f0427fb559c8b442c5828304378b607850faae9b5cc571270248c21db76dfdde60ff73f6aa93 SHA512 97cd4e2e10d7fdf3806a0750f90e537649d7eb29b893c5ec1d6e32abea0e1a1f4a7e94788733486aa27948e48d3362a1a76318595463edaa7bcd1c9d9c47a194
-AUX mozc-2.23.2815.102-reiwa.patch 924 BLAKE2B 3893f975d43ce29a962c5e712503442b178847acebd92e797040b6b699a37cb051f88a37965e0f194f73bca2e2397a8b070eb6d14bbe92adbba3e17a864e8c8b SHA512 df3f98ab93d7662b5ab038c732d3342b3f5860774559242eca1d0f1cd67275f315c6e4ffad83c6990ef5eb23fc19c0379ed7d3bdd0a377fcb080c066aecd16cc
-AUX mozc-2.23.2815.102-server_path_check.patch 3447 BLAKE2B 8e18cf2f7100cdfeb8e6b1420d773e955994cc7bd5e4bf56e0ffe78cd9a96b044c726c1045c2cd2c326ca151c8bf527b6447b2f509a20e4a912b535f5180ec80 SHA512 106c3170112bde2c6b9eb9ad5d5d460be53bb9162eb5613445170c2ce00f88385946360d13514167a6279c610744784079f8969b8f901f22e51e6397db22b0d3
-AUX mozc-2.23.2815.102-system_libraries.patch 9064 BLAKE2B 0cdf732a1bbf16d4af1d6dee81aacf3f3cb1f1c00c6aeb0fc12dac9dcd8611124e388e5fc721eb9b9472e073515d7457b182ee7cfe466b83bf319d572ae55240 SHA512 2d5b06e855f8c1889367b9b672e3ec81a037bc592872e28319e0180a0dcd177cdff817775a1a0f74ebf48e0b7558cf3b67953120411be753c662c43f409b05ce
AUX mozc-2.26.4220-environmental_variables.patch 3039 BLAKE2B 474d5f47a79f62db398a1b2a8e661640e2bcd3fb0271ec070a12973eb507d890ef2e8dd2033886e6b46723c07127304c9f736a639a0421d3c607fa126649a41f SHA512 978cd425e4c9ac183fd3c27720510889835c0f07ece2f80bd2c6038662a5384aff58dec636c78ce64f6573c1493b04d4a0b374ca2da85da39e76b81c6bcfb515
AUX mozc-2.26.4220-server_path_check.patch 3453 BLAKE2B 18dc82bfd4f05e8c54216dd43ff6c1d763ae3f7999193854e226e7425d848d43e3d11c2f1a70c19133731ad2e809ad38fa7113fbd623866467317a649ae5f56e SHA512 44d4647ede6759810355122b4ea972312587f5b520ffa59ba8dc5a04455c3f20b72fd91a3bc7f55f8b2e7ad88a0ce40e660b262825ec330e5f9280b03fee6cec
AUX mozc-2.26.4220-system_abseil-cpp.patch 14328 BLAKE2B 3900b3826cc46fdd2e9d74b3129033fb3ff31184f73f7f31806291f8e4db0fd175d139407e7cca3d3362fe47f546843c19056c90d483429c0b46d28a132e54cc SHA512 d59789fbeabf4309dbd5f99754b7c57e483f752ffe7fae2ba269ae75b64a31e6f4652af1c205462a40ae86fe0333b4178a37e9ca9ff839e630d50e447e96e6ec
AUX mozc-2.26.4220-system_gtest.patch 4751 BLAKE2B 3a5f0f5c2bb6d5a503d0078576b72baced4cc88f2b3ffed8d75c42d64047d48003565f29deafd1675e5efae2cf7584e414fe2a9e7a87fdceef5986edc88557ef SHA512 f5fe8cd973b3a5c45ab95ec44b0cd3e5ee64dbb9f1094d0ac1d5f7fbee43900aa64936006238ba1405cb6b7cd67cbe75a7040ef9b3744358c5bffe3ae8dcc4a5
AUX mozc-2.26.4220-system_jsoncpp.patch 3469 BLAKE2B 0fc4e66efd03bb966c8071fc35b9c9d51fa8b6588ed7cebe4e21ba750d0fb5e2ac3bcf834991e669fba613ef01c983d4d493945daa865418159cf5bb3b110d78 SHA512 c6236dff4ff44de028017b34ade7091a38ae21e237e39c50cd1125cfccd55ae0952fa0c8d7b19fe6dd863633fffa07843b7e0a2510e03a5230fbfbc02d90b545
-DIST fcitx-mozc-2.23.2815.102.1.patch 295112 BLAKE2B 709b84f6eaed16da38a173f40ae7cccff362fd167e6deb4090ae8a9ec522ac8e11ccff3c9ef6433907249af8c9eb4b7be12d2c05564cabd45c25e26764286ed3 SHA512 e0d4645df919838f0fe31a97bf6dd759c222a696f68133f7584d2c771f70734ea634a25bebb03a756221000d3552423207ee5163d75778dbf480b6e267ba4cd0
DIST fcitx-mozc-2.26.4220-20201219202429.tar.gz 37174759 BLAKE2B 3f320523103ee7a35a763f6613889e282e8a654db8ef11b5a1168db12611387e300621f015977875ffec3c10c055e36b3cb525a1e8559013f3ac42bc5b0cc296 SHA512 823e64267aa69e4d30dd7408f946a54072994f9a2d68691a3d393b216a15e70fd28bcb24705f9d9f483a2291517c48c5a1aeda893d2315f9a1d9cd352fb3feb2
-DIST japanese-usage-dictionary-20120416091336.tar.gz 71051 BLAKE2B 08eecf0aa021e27a2813f58c2d37f1cec760448f6ae086ae7468b8a11575c6ef9f72f656cb4d53e0179b8a7b00f2d91ff51a0ad7825e078dcbac0810f1d8b3e1 SHA512 b7e997a979b6d50e49b9af5dc830ea4df0532f6ab1321b9ef14983f65bb54f1b2967375c82e07957ae7693ebbf43c9b56ecea6bfea8dd1fdaee444bd549d83a7
DIST japanese-usage-dictionary-20180701040110.tar.gz 71285 BLAKE2B dfad056a1d5061b6764f583da15b9ad60a3c4421cee0430c4665d1c2779a64f9b31473c1746a3e2b9bda5167349432e51dcf7d4d48f75fde9543e9c16ff74c0d SHA512 68b4d3f52dd6cd4f00a8012a870b4f5929519cd69815b1729f3881d1f964802308f4aa101e236824b4c0f832183a9e8097437ed620403f2a652f126e7cdc1eb3
-DIST mozc-2.23.2815.102-protobuf_generated_classes_no_inheritance.patch 40296 BLAKE2B 982f43fa68031eb0f779ec4d034fef838a4fce7834291db889c9edffba3df4acd5bfdf187dd4a52ee8fd0714de943f9d5112070cd69502e1449dab4dbf8543b2 SHA512 6e05b2f22663ddbfb24a8344928ec24c0b4cf523728c1623367970b8f263142af95c056c82d0827414833d5b9f26c3a024a04a688851021601a5cbcc1474e754
-DIST mozc-2.23.2815.102.tar.gz 47739041 BLAKE2B 045a8a4a07e09cf923b67824111cdf672febc30256a6aef951ae779a3f582b3860042750d766120e376898c63be5b4baea870798a192cee34b0d48647e1ec5e6 SHA512 a3face616ca89990bca52371dcc8003604ebe0e9633116a64550add070152b1bc4d9b21e9f102c5afa6f8b1aa11d8dbc4bafbcebfaf4a12a934f085f245d548f
DIST mozc-2.26.4220-20201212102434.tar.gz 37106063 BLAKE2B 7d3c236809c8feb017f35e3f7a9b024ac34204f483c69913a2d1ae6b771054548f7f81afde35ed3a6887c9f7503584cee0fc646653fc7cde6fd015158de9c3d3 SHA512 9d87947b9b9256a3cc66cb23ab6caf4b6974142090b0d315c101bdc700fd289c259d09cb7f02f5f9e7462f48d652cd2d5b4822a645751fdcaed88b939520c429
-EBUILD mozc-2.23.2815.102.ebuild 11311 BLAKE2B b72bbc79a438821b1304d0b03411e71e6622d737da822ab4ea7a9f714d8dd4dd80438ce60f71adefec6a8561e28f93b672d8c8eb92e59e31bc498c8ecb356102 SHA512 e1a85a72689e4c012270a5e730fc3a519274419782dcfd977890f2b6609a68963f3a9733ab8227885d59ee71e6729d27417517410657ecc5b865af1ab444b725
EBUILD mozc-2.26.4220_p20201212102434_p20201219202429.ebuild 10406 BLAKE2B f38f270a88bd55d859d18b3f024bc3f1e4fdf16a2b8d05e325cf2e7e3537f7a0ffa571cb93e6952323b9e14c783318db64efa0324d8031a1e5a1f4e823410372 SHA512 3343db4d9e58608265dc688fe2d2d3fe8256a563d7dd78b14e1b7fbd267e792ef7764ea41d049160484f5e63b7995580e48d6dce2998c36a10c40ab11605b18f
EBUILD mozc-9999.ebuild 10241 BLAKE2B ed2748f5a233f05bfa1aa502405adde852b5ac1113da79396a98db3a7a70dcf05bc632d81b0f83d98d15bf47f238ca1174d69441548e61a68999f404003a99ab SHA512 36e9989834338051fa10cd06dbcdc533f45534f9c579c574f720fe86960a7f0552febf2ef009dfe5986e8dd46db71d75b71123192f570364083708ebb5c94ca9
-MISC metadata.xml 1016 BLAKE2B f038acf8faac7a92025ac8ab40384152bcdf7c18c31bbe48118b76e719c967633c9eb63b4c8b190607bf5dd448866a3fb180461e0d954a62671196cda2259516 SHA512 63bf48d503b5feddd94393f1a0ff1958122f271880099115332b2849cbe6454cdf3e67899e4363ae35df0ba9447acdeecb7682714f092c3dda6c752aafbd7483
+MISC metadata.xml 759 BLAKE2B 4a5a13fcee5848044c4fb7f47435c75e82ec5fc31d3666b101356a7ec14743eb4a997629066fbffc216caba1aff743653d92e491cd3ce2f54ee22019eff959b1 SHA512 a97788b46f15e7e7dd578dbc229334c694dda9cde84093a1fde53cb4a877773ed0a336247e3cb80dcc5962b27fc7946242634f2cad87c602c569262670f31ffa
diff --git a/app-i18n/mozc/files/mozc-2.20.2673.102-tests_build.patch b/app-i18n/mozc/files/mozc-2.20.2673.102-tests_build.patch
deleted file mode 100644
index b8b4477437f8..000000000000
--- a/app-i18n/mozc/files/mozc-2.20.2673.102-tests_build.patch
+++ /dev/null
@@ -1,27 +0,0 @@
---- /src/net/json_util_test.cc
-+++ /src/net/json_util_test.cc
-@@ -784,13 +784,13 @@
- for (size_t i = 0; i < arraysize(kNumS32ValueKeys); ++i) {
- {
- Json::Value json_value;
-- json_value[kNumS32ValueKeys[i]] = -2147483649ll;
-+ json_value[kNumS32ValueKeys[i]] = static_cast<Json::Value::Int64>(-2147483649ll);
- TestMsg msg;
- EXPECT_FALSE(JsonUtil::JsonValueToProtobufMessage(json_value, &msg));
- }
- {
- Json::Value json_value;
-- json_value[kNumS32ValueKeys[i]] = 2147483648ull;
-+ json_value[kNumS32ValueKeys[i]] = static_cast<Json::Value::UInt64>(2147483648ull);
- TestMsg msg;
- EXPECT_FALSE(JsonUtil::JsonValueToProtobufMessage(json_value, &msg));
- }
-@@ -805,7 +805,7 @@
- }
- {
- Json::Value json_value;
-- json_value[kNumU32ValueKeys[i]] = 4294967296ull;
-+ json_value[kNumU32ValueKeys[i]] = static_cast<Json::Value::UInt64>(4294967296ull);
- TestMsg msg;
- EXPECT_FALSE(JsonUtil::JsonValueToProtobufMessage(json_value, &msg));
- }
diff --git a/app-i18n/mozc/files/mozc-2.20.2673.102-tests_skipping.patch b/app-i18n/mozc/files/mozc-2.20.2673.102-tests_skipping.patch
deleted file mode 100644
index 28c5486de50d..000000000000
--- a/app-i18n/mozc/files/mozc-2.20.2673.102-tests_skipping.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-Disable test leaving mozc_server orphan process.
-
---- /src/unix/ibus/mozc_engine_test.cc
-+++ /src/unix/ibus/mozc_engine_test.cc
-@@ -41,65 +41,5 @@
- namespace mozc {
- namespace ibus {
-
--class LaunchToolTest : public testing::Test {
-- public:
-- LaunchToolTest() {
-- g_type_init();
-- }
--
-- protected:
-- virtual void SetUp() {
-- mozc_engine_.reset(new MozcEngine());
--
-- mock_ = new client::ClientMock();
-- mock_->ClearFunctionCounter();
-- mozc_engine_->client_.reset(mock_);
-- }
--
-- virtual void TearDown() {
-- mozc_engine_.reset();
-- }
--
-- client::ClientMock* mock_;
-- unique_ptr<MozcEngine> mozc_engine_;
--
-- private:
-- DISALLOW_COPY_AND_ASSIGN(LaunchToolTest);
--};
--
--TEST_F(LaunchToolTest, LaunchToolTest) {
-- commands::Output output;
--
-- // Launch config dialog
-- mock_->ClearFunctionCounter();
-- mock_->SetBoolFunctionReturn("LaunchToolWithProtoBuf", true);
-- output.set_launch_tool_mode(commands::Output::CONFIG_DIALOG);
-- EXPECT_TRUE(mozc_engine_->LaunchTool(output));
--
-- // Launch dictionary tool
-- mock_->ClearFunctionCounter();
-- mock_->SetBoolFunctionReturn("LaunchToolWithProtoBuf", true);
-- output.set_launch_tool_mode(commands::Output::DICTIONARY_TOOL);
-- EXPECT_TRUE(mozc_engine_->LaunchTool(output));
--
-- // Launch word register dialog
-- mock_->ClearFunctionCounter();
-- mock_->SetBoolFunctionReturn("LaunchToolWithProtoBuf", true);
-- output.set_launch_tool_mode(commands::Output::WORD_REGISTER_DIALOG);
-- EXPECT_TRUE(mozc_engine_->LaunchTool(output));
--
-- // Launch no tool(means do nothing)
-- mock_->ClearFunctionCounter();
-- mock_->SetBoolFunctionReturn("LaunchToolWithProtoBuf", false);
-- output.set_launch_tool_mode(commands::Output::NO_TOOL);
-- EXPECT_FALSE(mozc_engine_->LaunchTool(output));
--
-- // Something occurring in client::Client::LaunchTool
-- mock_->ClearFunctionCounter();
-- mock_->SetBoolFunctionReturn("LaunchToolWithProtoBuf", false);
-- output.set_launch_tool_mode(commands::Output::CONFIG_DIALOG);
-- EXPECT_FALSE(mozc_engine_->LaunchTool(output));
--}
--
- } // namespace ibus
- } // namespace mozc
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-environmental_variables.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-environmental_variables.patch
deleted file mode 100644
index 02e522a32e9e..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-environmental_variables.patch
+++ /dev/null
@@ -1,132 +0,0 @@
-https://github.com/google/mozc/issues/470
-
---- /src/base/system_util.cc
-+++ /src/base/system_util.cc
-@@ -208,28 +208,39 @@
- dir_ = "/";
- return;
- #else // MOZC_USE_PEPPER_FILE_IO
-+ const char *configuration_directory_env;
- string dir;
-
- #ifdef OS_WIN
-- DCHECK(SUCCEEDED(Singleton<LocalAppDataDirectoryCache>::get()->result()));
-- dir = Singleton<LocalAppDataDirectoryCache>::get()->path();
-+ configuration_directory_env = ::getenv("MOZC_CONFIGURATION_DIRECTORY");
-+ if (configuration_directory_env) {
-+ dir = configuration_directory_env;
-+ } else {
-+ DCHECK(SUCCEEDED(Singleton<LocalAppDataDirectoryCache>::get()->result()));
-+ dir = Singleton<LocalAppDataDirectoryCache>::get()->path();
- #ifdef GOOGLE_JAPANESE_INPUT_BUILD
-- dir = FileUtil::JoinPath(dir, kCompanyNameInEnglish);
-- FileUtil::CreateDirectory(dir);
-+ dir = FileUtil::JoinPath(dir, kCompanyNameInEnglish);
-+ FileUtil::CreateDirectory(dir);
- #endif // GOOGLE_JAPANESE_INPUT_BUILD
-- dir = FileUtil::JoinPath(dir, kProductNameInEnglish);
-+ dir = FileUtil::JoinPath(dir, kProductNameInEnglish);
-+ }
-
- #elif defined(OS_MACOSX)
-- dir = MacUtil::GetApplicationSupportDirectory();
-+ configuration_directory_env = ::getenv("MOZC_CONFIGURATION_DIRECTORY");
-+ if (configuration_directory_env) {
-+ dir = configuration_directory_env;
-+ } else {
-+ dir = MacUtil::GetApplicationSupportDirectory();
- #ifdef GOOGLE_JAPANESE_INPUT_BUILD
-- dir = FileUtil::JoinPath(dir, "Google");
-- // The permission of ~/Library/Application Support/Google seems to be 0755.
-- // TODO(komatsu): nice to make a wrapper function.
-- ::mkdir(dir.c_str(), 0755);
-- dir = FileUtil::JoinPath(dir, "JapaneseInput");
-+ dir = FileUtil::JoinPath(dir, "Google");
-+ // The permission of ~/Library/Application Support/Google seems to be 0755.
-+ // TODO(komatsu): nice to make a wrapper function.
-+ ::mkdir(dir.c_str(), 0755);
-+ dir = FileUtil::JoinPath(dir, "JapaneseInput");
- #else // GOOGLE_JAPANESE_INPUT_BUILD
-- dir = FileUtil::JoinPath(dir, "Mozc");
-+ dir = FileUtil::JoinPath(dir, "Mozc");
- #endif // GOOGLE_JAPANESE_INPUT_BUILD
-+ }
-
- #elif defined(OS_ANDROID)
- // For android, we do nothing here because user profile directory,
-@@ -237,14 +248,24 @@
- // is injected from Java layer.
-
- #else // !OS_WIN && !OS_MACOSX && !OS_ANDROID
-- char buf[1024];
-- struct passwd pw, *ppw;
-- const uid_t uid = geteuid();
-- CHECK_EQ(0, getpwuid_r(uid, &pw, buf, sizeof(buf), &ppw))
-- << "Can't get passwd entry for uid " << uid << ".";
-- CHECK_LT(0, strlen(pw.pw_dir))
-- << "Home directory for uid " << uid << " is not set.";
-- dir = FileUtil::JoinPath(pw.pw_dir, ".mozc");
-+ configuration_directory_env = ::getenv("MOZC_CONFIGURATION_DIRECTORY");
-+ if (configuration_directory_env) {
-+ dir = configuration_directory_env;
-+ } else {
-+ const char *home_env = ::getenv("HOME");
-+ if (home_env) {
-+ dir = FileUtil::JoinPath(home_env, ".mozc");
-+ } else {
-+ char buf[1024];
-+ struct passwd pw, *ppw;
-+ const uid_t uid = geteuid();
-+ CHECK_EQ(0, getpwuid_r(uid, &pw, buf, sizeof(buf), &ppw))
-+ << "Can't get passwd entry for uid " << uid << ".";
-+ CHECK_LT(0, strlen(pw.pw_dir))
-+ << "Home directory for uid " << uid << " is not set.";
-+ dir = FileUtil::JoinPath(pw.pw_dir, ".mozc");
-+ }
-+ }
- #endif // !OS_WIN && !OS_MACOSX && !OS_ANDROID
-
- FileUtil::CreateDirectory(dir);
-@@ -356,6 +377,10 @@
- #endif // OS_WIN
-
- string SystemUtil::GetServerDirectory() {
-+ const char *server_directory_env = ::getenv("MOZC_SERVER_DIRECTORY");
-+ if (server_directory_env) {
-+ return server_directory_env;
-+ }
- #ifdef OS_WIN
- DCHECK(SUCCEEDED(Singleton<ProgramFilesX86Cache>::get()->result()));
- #if defined(GOOGLE_JAPANESE_INPUT_BUILD)
-@@ -409,6 +434,10 @@
- }
-
- string SystemUtil::GetDocumentDirectory() {
-+ const char *documents_directory_env = ::getenv("MOZC_DOCUMENTS_DIRECTORY");
-+ if (documents_directory_env) {
-+ return documents_directory_env;
-+ }
- #if defined(OS_MACOSX)
- return GetServerDirectory();
- #elif defined(MOZC_DOCUMENT_DIRECTORY)
---- /src/handwriting/zinnia_handwriting.cc
-+++ /src/handwriting/zinnia_handwriting.cc
-@@ -31,6 +31,7 @@
-
- #include "handwriting/zinnia_handwriting.h"
-
-+#include <cstdlib>
- #include <memory>
- #include <string>
-
-@@ -48,6 +49,10 @@
-
- // static
- string ZinniaHandwriting::GetModelFileName() {
-+ const char *zinnia_model_file_env = ::getenv("MOZC_ZINNIA_MODEL_FILE");
-+ if (zinnia_model_file_env) {
-+ return zinnia_model_file_env;
-+ }
- #if defined(MOZC_BUILD)
- return MOZC_ZINNIA_MODEL_FILE;
- #else
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-gcc-8.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-gcc-8.patch
deleted file mode 100644
index 07514048e7a4..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-gcc-8.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-https://github.com/google/mozc/issues/441
-
---- /src/prediction/zero_query_dict.h
-+++ /src/prediction/zero_query_dict.h
-@@ -147,6 +147,17 @@
- return iter;
- }
-
-+ iterator &operator--() {
-+ ptr_ -= kTokenByteSize;
-+ return *this;
-+ }
-+
-+ iterator operator--(int) {
-+ const iterator tmp(ptr_, string_array_);
-+ ptr_ -= kTokenByteSize;
-+ return tmp;
-+ }
-+
- iterator &operator-=(ptrdiff_t n) {
- ptr_ -= n * kTokenByteSize;
- return *this;
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-protobuf-3.18.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-protobuf-3.18.patch
deleted file mode 100644
index 15077bb1752a..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-protobuf-3.18.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-Fix building with Protocol Buffers >=3.18.
-
---- /src/dictionary/user_dictionary_storage.cc
-+++ /src/dictionary/user_dictionary_storage.cc
-@@ -108,7 +108,7 @@
- // wants to use more than 512MB.
- mozc::protobuf::io::IstreamInputStream zero_copy_input(&ifs);
- mozc::protobuf::io::CodedInputStream decoder(&zero_copy_input);
-- decoder.SetTotalBytesLimit(kDefaultTotalBytesLimit, -1);
-+ decoder.SetTotalBytesLimit(kDefaultTotalBytesLimit);
- if (!user_dictionary_storage_base.ParseFromCodedStream(&decoder)) {
- LOG(ERROR) << "Failed to parse";
- if (!decoder.ConsumedEntireMessage() || !ifs.eof()) {
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch
deleted file mode 100644
index 2b9bbd720cde..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch
+++ /dev/null
@@ -1,621 +0,0 @@
-https://github.com/google/mozc/issues/462
-
---- /src/base/gen_character_set.py
-+++ /src/base/gen_character_set.py
-@@ -33,7 +33,6 @@
- import itertools
- import optparse
- import re
--import string
- import sys
-
-
-@@ -89,7 +88,8 @@
- @staticmethod
- def _LoadTable(filename, column_index, pattern, validater):
- result = set()
-- for line in open(filename):
-+ fh = open(filename)
-+ for line in fh:
- if line.startswith('#'):
- # Skip a comment line.
- continue
-@@ -100,6 +100,7 @@
- ucs = int(match.group(1), 16)
- if validater(ucs):
- result.add(ucs)
-+ fh.close()
-
- return result
-
-@@ -250,7 +251,7 @@
- # (at most) four code points.
- bit_list = []
- for _, group in itertools.groupby(enumerate(category_list),
-- lambda (codepoint, _): codepoint / 4):
-+ lambda x: x[0] // 4):
- # Fill bits from LSB to MSB for each group.
- bits = 0
- for index, (_, category) in enumerate(group):
-@@ -263,7 +264,7 @@
-
- # Output the content. Each line would have (at most) 16 bytes.
- for _, group in itertools.groupby(enumerate(bit_list),
-- lambda (index, _): index / 16):
-+ lambda x: x[0] // 16):
- line = [' \"']
- for _, bits in group:
- line.append('\\x%02X' % bits)
-@@ -386,7 +387,7 @@
- # Bitmap lookup.
- # TODO(hidehiko): the bitmap has two huge 0-bits ranges. Reduce them.
- category_map = [
-- (bits, category) for category, bits in CATEGORY_BITMAP.iteritems()]
-+ (bits, category) for category, bits in CATEGORY_BITMAP.items()]
- category_map.sort()
-
- lines.extend([
-@@ -451,7 +452,7 @@
- options.jisx0213file)
- category_list = [
- categorizer.GetCategory(codepoint)
-- for codepoint in xrange(categorizer.MaxCodePoint() + 1)]
-+ for codepoint in range(categorizer.MaxCodePoint() + 1)]
- generated_character_set_header = GenerateCharacterSetHeader(category_list)
-
- # Write the result.
---- /src/base/gen_config_file_stream_data.py
-+++ /src/base/gen_config_file_stream_data.py
-@@ -58,7 +58,7 @@
- result = []
- result.append(' { "%s", "' % os.path.basename(path))
- with open(path, 'rb') as stream:
-- result.extend(r'\x%02X' % ord(byte) for byte in stream.read())
-+ result.extend(r'\x%02X' % byte for byte in stream.read())
- result.append('", %d }' % os.path.getsize(path))
-
- return ''.join(result)
-@@ -93,8 +93,8 @@
- def main():
- (options, args) = ParseOptions()
- if not options.output:
-- print >>sys.stderr, (
-- 'usage: gen_config_file_stream_data.py --output=filepath input ...')
-+ print('usage: gen_config_file_stream_data.py --output=filepath input ...',
-+ file=sys.stderr)
- sys.exit(2)
-
- with open(options.output, 'w') as output:
---- /src/build_mozc.py
-+++ /src/build_mozc.py
-@@ -943,7 +943,7 @@
- logging.info('running %s...', binary)
- try:
- test_function(binary, gtest_report_dir, options)
-- except RunOrDieError, e:
-+ except RunOrDieError as e:
- logging.error(e)
- failed_tests.append(binary)
- else:
-@@ -1082,7 +1082,7 @@
- # and '-c' and 'Release' are build options.
- targets = []
- build_options = []
-- for i in xrange(len(args)):
-+ for i in range(len(args)):
- if args[i].startswith('-'):
- # starting with build options
- build_options = args[i:]
-@@ -1190,14 +1190,14 @@
-
- def ShowHelpAndExit():
- """Shows the help message."""
-- print 'Usage: build_mozc.py COMMAND [ARGS]'
-- print 'Commands: '
-- print ' gyp Generate project files.'
-- print ' build Build the specified target.'
-- print ' runtests Build all tests and run them.'
-- print ' clean Clean all the build files and directories.'
-- print ''
-- print 'See also the comment in the script for typical usage.'
-+ print('Usage: build_mozc.py COMMAND [ARGS]')
-+ print('Commands: ')
-+ print(' gyp Generate project files.')
-+ print(' build Build the specified target.')
-+ print(' runtests Build all tests and run them.')
-+ print(' clean Clean all the build files and directories.')
-+ print('')
-+ print('See also the comment in the script for typical usage.')
- sys.exit(1)
-
-
---- /src/build_tools/android_util.py
-+++ /src/build_tools/android_util.py
-@@ -548,7 +548,7 @@
- (devices_result, _) = process.communicate()
- used_ports = set(int(port) for port
- in re.findall(r'emulator-(\d+)', devices_result))
-- return [port for port in xrange(5554, 5586, 2) if port not in used_ports]
-+ return [port for port in range(5554, 5586, 2) if port not in used_ports]
-
-
- def SetUpTestingSdkHomeDirectory(dest_android_sdk_home,
-@@ -575,7 +575,7 @@
- 'create', 'avd',
- '--force',
- '--sdcard', '512M',]
-- for key, value in options.iteritems():
-+ for key, value in options.items():
- args.extend([key, value])
- env = {'ANDROID_SDK_HOME': os.path.abspath(dest_android_sdk_home)}
- logging.info('Creating AVD: %s', args)
-@@ -615,7 +615,7 @@
- def main():
- for arg in sys.argv[1:]:
- for item in sorted(GetApkProperties(arg).items()):
-- print '%s: %s' % item
-+ print('%s: %s' % item)
-
-
- if __name__ == '__main__':
---- /src/build_tools/binary_size_checker.py
-+++ /src/build_tools/binary_size_checker.py
-@@ -70,12 +70,12 @@
- actual_size = os.stat(filename).st_size
- expected_size = EXPECTED_MAXIMUM_SIZES[basename]
- if actual_size < expected_size * 1024 * 1024:
-- print 'Pass: %s (size: %d) is smaller than expected (%d MB)' % (
-- filename, actual_size, expected_size)
-+ print('Pass: %s (size: %d) is smaller than expected (%d MB)' % (
-+ filename, actual_size, expected_size))
- return True
- else:
-- print 'WARNING: %s (size: %d) is larger than expected (%d MB)' % (
-- filename, actual_size, expected_size)
-+ print('WARNING: %s (size: %d) is larger than expected (%d MB)' % (
-+ filename, actual_size, expected_size))
- return False
-
-
---- /src/build_tools/build_and_sign_pkg_mac.py
-+++ /src/build_tools/build_and_sign_pkg_mac.py
-@@ -44,8 +44,8 @@
- import shutil
- import sys
-
--from util import PrintErrorAndExit
--from util import RunOrDie
-+from .util import PrintErrorAndExit
-+from .util import RunOrDie
-
-
- def ParseOption():
---- /src/build_tools/build_breakpad.py
-+++ /src/build_tools/build_breakpad.py
-@@ -54,9 +54,9 @@
- try:
- subprocess.check_output(command)
- except subprocess.CalledProcessError as e:
-- print e.output
-+ print(e.output)
- sys.exit(e.returncode)
-- print 'Done: %s' % ' '.join(command)
-+ print('Done: %s' % ' '.join(command))
-
-
- def Xcodebuild(projdir, target, arch, sdk, outdir):
---- /src/build_tools/build_diskimage_mac.py
-+++ /src/build_tools/build_diskimage_mac.py
-@@ -90,7 +90,7 @@
- # setup volume directory
- temp_dir = tempfile.mkdtemp()
- CopyFile(path.join(build_dir, ".keystone_install"), temp_dir)
-- os.chmod(path.join(temp_dir, ".keystone_install"), 0755) # rwxr-xr-x
-+ os.chmod(path.join(temp_dir, ".keystone_install"), 0o755) # rwxr-xr-x
- for a in args:
- CopyFile(path.join(build_dir, a), temp_dir)
-
---- /src/build_tools/change_reference_mac.py
-+++ /src/build_tools/change_reference_mac.py
-@@ -41,8 +41,8 @@
- import optparse
- import os
-
--from util import PrintErrorAndExit
--from util import RunOrDie
-+from .util import PrintErrorAndExit
-+from .util import RunOrDie
-
-
- def ParseOption():
---- /src/build_tools/code_generator_util.py
-+++ /src/build_tools/code_generator_util.py
-@@ -33,27 +33,26 @@
- __author__ = "hidehiko"
-
- import struct
--import types
-
-
- def ToCppStringLiteral(s):
- """Returns C-style string literal, or NULL if given s is None."""
- if s is None:
-- return 'NULL'
-+ return b'NULL'
-
-- if all(0x20 <= ord(c) <= 0x7E for c in s):
-+ if all(0x20 <= c <= 0x7E for c in s):
- # All characters are in ascii code.
-- return '"%s"' % s.replace('\\', r'\\').replace('"', r'\"')
-+ return b'"%b"' % s.replace(b'\\', br'\\').replace(b'"', br'\"')
- else:
- # One or more characters are non-ascii.
-- return '"%s"' % ''.join(r'\x%02X' % ord(c) for c in s)
-+ return b'"%b"' % b''.join(br'\x%02X' % c for c in s)
-
-
- def FormatWithCppEscape(format_text, *args):
- """Returns a string filling format with args."""
- literal_list = []
- for arg in args:
-- if isinstance(arg, (types.StringType, types.NoneType)):
-+ if isinstance(arg, (bytes, type(None))):
- arg = ToCppStringLiteral(arg)
- literal_list.append(arg)
-
-@@ -95,7 +94,7 @@
- if target_compiler and target_compiler.startswith('msvs'):
- stream.write('const uint64 k%s_data_wordtype[] = {\n' % variable_name)
-
-- for word_index in xrange(0, len(data), 8):
-+ for word_index in range(0, len(data), 8):
- word_chunk = data[word_index:word_index + 8].ljust(8, '\x00')
- stream.write('0x%016X, ' % struct.unpack('<Q', word_chunk))
- if (word_index / 8) % 4 == 3:
-@@ -111,7 +110,7 @@
- stream.write('const char k%s_data[] =\n' % variable_name)
- # Output 16bytes per line.
- chunk_size = 16
-- for index in xrange(0, len(data), chunk_size):
-+ for index in range(0, len(data), chunk_size):
- chunk = data[index:index + chunk_size]
- stream.write('"')
- stream.writelines(r'\x%02X' % ord(c) for c in chunk)
-@@ -126,36 +125,50 @@
- if type(codepoint_list) is int:
- codepoint_list = (codepoint_list,)
- if codepoint_list is None or len(codepoint_list) == 0:
-- return 'null'
-- result = r'"'
-+ return b'null'
-+ result = b'"'
- for codepoint in codepoint_list:
-- utf16_string = unichr(codepoint).encode('utf-16be')
-+ utf16_string = chr(codepoint).encode('utf-16be')
- if len(utf16_string) == 2:
- (u0, l0) = utf16_string
-- result += r'\u%02X%02X' % (ord(u0), ord(l0))
-+ result += br'\u%02X%02X' % (u0, l0)
- else:
- (u0, l0, u1, l1) = utf16_string
-- result += r'\u%02X%02X\u%02X%02X' % (ord(u0), ord(l0), ord(u1), ord(l1))
-- result += r'"'
-+ result += br'\u%02X%02X\u%02X%02X' % (u0, l0, u1, l1)
-+ result += b'"'
- return result
-
-
- def SkipLineComment(stream, comment_prefix='#'):
- """Skips line comments from stream."""
- for line in stream:
-+ if isinstance(line, bytes):
-+ if isinstance(comment_prefix, str):
-+ comment_prefix = comment_prefix.encode('utf-8')
-+ line_ending = b'\n'
-+ else:
-+ line_ending = '\n'
- stripped_line = line.strip()
- if stripped_line and not stripped_line.startswith(comment_prefix):
-- yield line.rstrip('\n')
-+ yield line.rstrip(line_ending)
-
-
- def ParseColumnStream(stream, num_column=None, delimiter=None):
- """Returns parsed columns read from stream."""
- if num_column is None:
- for line in stream:
-- yield line.rstrip('\n').split(delimiter)
-+ if isinstance(line, bytes):
-+ line_ending = b'\n'
-+ else:
-+ line_ending = '\n'
-+ yield line.rstrip(line_ending).split(delimiter)
- else:
- for line in stream:
-- yield line.rstrip('\n').split(delimiter)[:num_column]
-+ if isinstance(line, bytes):
-+ line_ending = b'\n'
-+ else:
-+ line_ending = '\n'
-+ yield line.rstrip(line_ending).split(delimiter)[:num_column]
-
-
- def SelectColumn(stream, column_index):
-@@ -172,5 +185,5 @@
- grouper extends the last chunk to make it an n-element chunk by adding
- appropriate value, but this returns truncated chunk.
- """
-- for index in xrange(0, len(iterable), n):
-+ for index in range(0, len(iterable), n):
- yield iterable[index:index + n]
---- /src/build_tools/codesign_mac.py
-+++ /src/build_tools/codesign_mac.py
-@@ -46,17 +46,17 @@
-
- def RunOrDie(command):
- """Run the command, or die if it failed."""
-- print "Running: " + command
-+ print("Running: " + command)
- try:
- output = subprocess.check_output(command, shell=True)
-- print >> sys.stderr, "=========="
-- print >> sys.stderr, "COMMAND: " + command
-- print >> sys.stderr, output
-+ print("==========", file=sys.stderr)
-+ print("COMMAND: " + command, file=sys.stderr)
-+ print(output, file=sys.stderr)
- except subprocess.CalledProcessError as e:
-- print >> sys.stderr, "=========="
-- print >> sys.stderr, "ERROR: " + command
-- print >> sys.stderr, e.output
-- print >> sys.stderr, "=========="
-+ print("==========", file=sys.stderr)
-+ print("ERROR: " + command, file=sys.stderr)
-+ print(e.output, file=sys.stderr)
-+ print("==========", file=sys.stderr)
- sys.exit(1)
-
-
-@@ -119,18 +119,18 @@
- (options, unused_args) = parser.parse_args()
-
- if not options.target:
-- print "Error: --target should be specified."
-- print parser.print_help()
-+ print("Error: --target should be specified.")
-+ print(parser.print_help())
- sys.exit(1)
-
- return options
-
-
- def DumpEnviron():
-- print "=== os.environ ==="
-+ print("=== os.environ ===")
- for key in sorted(os.environ):
-- print "%s = %s" % (key, os.getenv(key))
-- print "=================="
-+ print("%s = %s" % (key, os.getenv(key)))
-+ print("==================")
-
-
- def main():
---- /src/build_tools/copy_dll_and_symbol.py
-+++ /src/build_tools/copy_dll_and_symbol.py
-@@ -38,7 +38,7 @@
- import os
- import shutil
-
--from util import PrintErrorAndExit
-+from .util import PrintErrorAndExit
-
- def ParseOption():
- """Parse command line options."""
-@@ -98,7 +98,7 @@
- if _GetLastModifiedTime(src) <= target_file_mtime:
- # Older file found. Ignore.
- continue
-- print 'Copying %s to %s' % (src, target_file_abspath)
-+ print('Copying %s to %s' % (src, target_file_abspath))
- shutil.copy2(src, target_file_abspath)
- break
-
---- /src/build_tools/copy_file.py
-+++ /src/build_tools/copy_file.py
-@@ -52,7 +52,7 @@
- Args:
- message: The error message to be printed to stderr.
- """
-- print >>sys.stderr, message
-+ print(message, file=sys.stderr)
- sys.exit(1)
-
-
---- /src/build_tools/copy_qt_frameworks_mac.py
-+++ /src/build_tools/copy_qt_frameworks_mac.py
-@@ -41,9 +41,9 @@
- import optparse
- import os
-
--from copy_file import CopyFiles
--from util import PrintErrorAndExit
--from util import RunOrDie
-+from .copy_file import CopyFiles
-+from .util import PrintErrorAndExit
-+from .util import RunOrDie
-
-
- def ParseOption():
---- /src/build_tools/embed_file.py
-+++ /src/build_tools/embed_file.py
-@@ -46,10 +46,10 @@
-
- def _FormatAsUint64LittleEndian(s):
- """Formats a string as uint64 value in little endian order."""
-- for _ in xrange(len(s), 8):
-- s += '\0'
-+ for _ in range(len(s), 8):
-+ s += b'\0'
- s = s[::-1] # Reverse the string
-- return '0x%s' % binascii.b2a_hex(s)
-+ return b'0x%b' % binascii.b2a_hex(s)
-
-
- def main():
-@@ -57,30 +57,30 @@
- with open(opts.input, 'rb') as infile:
- with open(opts.output, 'wb') as outfile:
- outfile.write(
-- '#ifdef MOZC_EMBEDDED_FILE_%(name)s\n'
-- '#error "%(name)s was already included or defined elsewhere"\n'
-- '#else\n'
-- '#define MOZC_EMBEDDED_FILE_%(name)s\n'
-- 'const uint64 %(name)s_data[] = {\n'
-- % {'name': opts.name})
-+ b'#ifdef MOZC_EMBEDDED_FILE_%(name)b\n'
-+ b'#error "%(name)b was already included or defined elsewhere"\n'
-+ b'#else\n'
-+ b'#define MOZC_EMBEDDED_FILE_%(name)b\n'
-+ b'const uint64 %(name)b_data[] = {\n'
-+ % {b'name': opts.name.encode('utf-8')})
-
- while True:
- chunk = infile.read(8)
- if not chunk:
- break
-- outfile.write(' ')
-+ outfile.write(b' ')
- outfile.write(_FormatAsUint64LittleEndian(chunk))
-- outfile.write(',\n')
-+ outfile.write(b',\n')
-
- outfile.write(
-- '};\n'
-- 'const EmbeddedFile %(name)s = {\n'
-- ' %(name)s_data,\n'
-- ' %(size)d,\n'
-- '};\n'
-- '#endif // MOZC_EMBEDDED_FILE_%(name)s\n'
-- % {'name': opts.name,
-- 'size': os.stat(opts.input).st_size})
-+ b'};\n'
-+ b'const EmbeddedFile %(name)b = {\n'
-+ b' %(name)b_data,\n'
-+ b' %(size)d,\n'
-+ b'};\n'
-+ b'#endif // MOZC_EMBEDDED_FILE_%(name)b\n'
-+ % {b'name': opts.name.encode('utf-8'),
-+ b'size': os.stat(opts.input).st_size})
-
-
- if __name__ == '__main__':
---- /src/build_tools/embed_pathname.py
-+++ /src/build_tools/embed_pathname.py
-@@ -28,7 +28,7 @@
- # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
--"""A script to embed the given (relative) path name to C/C++ characters array.
-+r"""A script to embed the given (relative) path name to C/C++ characters array.
-
- Example:
- ./embed_pathname.py --path_to_be_embedded=d:\data\mozc
-@@ -53,7 +53,7 @@
-
- (options, unused_args) = parser.parse_args()
- if not all(vars(options).values()):
-- print parser.print_help()
-+ print(parser.print_help())
- sys.exit(1)
-
- return options
-@@ -63,7 +63,7 @@
- opt = ParseOption()
- path = os.path.abspath(opt.path_to_be_embedded)
- # TODO(yukawa): Consider the case of non-ASCII characters.
-- escaped_path = path.encode('string-escape')
-+ escaped_path = path.replace('\\', '\\\\')
- with open(opt.output, 'w') as output_file:
- output_file.write(
- 'const char %s[] = "%s";\n' % (opt.constant_name, escaped_path))
---- /src/build_tools/ensure_gyp_module_path.py
-+++ /src/build_tools/ensure_gyp_module_path.py
-@@ -48,7 +48,7 @@
-
- (options, _) = parser.parse_args()
- if not options.expected:
-- print parser.print_help()
-+ print(parser.print_help())
- sys.exit(1)
-
- return options
-@@ -59,20 +59,20 @@
- opt = ParseOption()
- expected_path = os.path.abspath(opt.expected)
- if not os.path.exists(expected_path):
-- print '%s does not exist.' % expected_path
-+ print('%s does not exist.' % expected_path)
- sys.exit(1)
-
- try:
- import gyp # NOLINT
- except ImportError as e:
-- print 'import gyp failed: %s' % e
-+ print('import gyp failed: %s' % e)
- sys.exit(1)
-
- actual_path = os.path.abspath(gyp.__path__[0])
- if expected_path != actual_path:
-- print 'Unexpected gyp module is loaded on this environment.'
-- print ' expected: %s' % expected_path
-- print ' actual : %s' % actual_path
-+ print('Unexpected gyp module is loaded on this environment.')
-+ print(' expected: %s' % expected_path)
-+ print(' actual : %s' % actual_path)
- sys.exit(1)
-
- if __name__ == '__main__':
---- /src/build_tools/gen_win32_resource_header.py
-+++ /src/build_tools/gen_win32_resource_header.py
-@@ -39,7 +39,7 @@
- __author__ = "yukawa"
-
- import logging
--import mozc_version
-+from . import mozc_version
- import optparse
- import os
- import sys
---- /src/build_tools/mozc_version.py
-+++ /src/build_tools/mozc_version.py
-@@ -94,7 +94,7 @@
- last_digit = TARGET_PLATFORM_TO_DIGIT.get(target_platform, None)
- if last_digit is None:
- logging.critical('target_platform %s is invalid. Accetable ones are %s',
-- target_platform, TARGET_PLATFORM_TO_DIGIT.keys())
-+ target_platform, list(TARGET_PLATFORM_TO_DIGIT.keys()))
- sys.exit(1)
-
- if not revision:
-@@ -314,13 +314,14 @@
- self._properties = {}
- if not os.path.isfile(path):
- return
-- for line in open(path):
-- matchobj = re.match(r'(\w+)=(.*)', line.strip())
-- if matchobj:
-- var = matchobj.group(1)
-- val = matchobj.group(2)
-- if var not in self._properties:
-- self._properties[var] = val
-+ with open(path) as file:
-+ for line in file:
-+ matchobj = re.match(r'(\w+)=(.*)', line.strip())
-+ if matchobj:
-+ var = matchobj.group(1)
-+ val = matchobj.group(2)
-+ if var not in self._properties:
-+ self._properties[var] = val
-
- # Check mandatory properties.
- for key in VERSION_PROPERTIES:
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch
deleted file mode 100644
index 456e8368049a..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch
+++ /dev/null
@@ -1,600 +0,0 @@
-https://github.com/google/mozc/issues/462
-
---- /src/build_tools/redirect.py
-+++ /src/build_tools/redirect.py
-@@ -58,14 +58,15 @@
- process = subprocess.Popen(sys.argv, stdout=subprocess.PIPE,
- universal_newlines=True)
- except:
-- print '=========='
-- print ' ERROR: %s' % ' '.join(sys.argv)
-- print '=========='
-+ print('==========')
-+ print(' ERROR: %s' % ' '.join(sys.argv))
-+ print('==========')
- raise
- (stdout_content, _) = process.communicate()
- # Write the stdout content to the output file.
- output_file = open(output_file_name, 'w')
- output_file.write(stdout_content)
-+ output_file.close()
- return process.wait()
-
- if __name__ == '__main__':
---- /src/build_tools/run_after_chdir.py
-+++ /src/build_tools/run_after_chdir.py
-@@ -57,7 +57,7 @@
- sys.argv.insert(0, sys.executable) # Inject the python interpreter path.
- # We don't capture stdout and stderr from Popen. The output will just
- # be emitted to a terminal or console.
-- print sys.argv
-+ print(sys.argv)
- sys.exit(subprocess.call(sys.argv))
-
- if __name__ == '__main__':
---- /src/build_tools/serialized_string_array_builder.py
-+++ /src/build_tools/serialized_string_array_builder.py
-@@ -58,11 +58,11 @@
- f.write(struct.pack('<I', array_size))
-
- # Offset and length array of (4 + 4) * array_size bytes.
-- for i in xrange(array_size):
-+ for i in range(array_size):
- f.write(struct.pack('<I', offsets[i]))
- f.write(struct.pack('<I', lengths[i]))
-
- # Strings chunk.
-- for i in xrange(array_size):
-+ for i in range(array_size):
- f.write(strings[i])
-- f.write('\0')
-+ f.write(b'\0')
---- /src/build_tools/test_tools/gtest_report.py
-+++ /src/build_tools/test_tools/gtest_report.py
-@@ -36,9 +36,9 @@
-
- __author__ = "nona"
-
--import cStringIO as StringIO
-+import io
- import logging
--from xml.etree import cElementTree as ElementTree
-+from xml.etree import ElementTree
-
-
- class Failure(object):
-@@ -87,13 +87,13 @@
- """Returns summarized error report text."""
- if self.fail_num == 0:
- return ''
-- output = StringIO.StringIO()
-+ output = io.StringIO()
- for testcase in self.testcases:
- if not testcase.failures:
- continue
-- print >>output, '%s.%s:' % (self.name, testcase.name)
-+ print('%s.%s:' % (self.name, testcase.name), file=output)
- for failure in testcase.failures:
-- print >>output, failure.contents.encode('utf-8')
-+ print(failure.contents.encode('utf-8'), file=output)
- return output.getvalue()
-
- @classmethod
---- /src/build_tools/test_tools/test_launcher.py
-+++ /src/build_tools/test_tools/test_launcher.py
-@@ -101,11 +101,11 @@
- time.sleep(1)
- try:
- shutil.rmtree(self._path)
-- except OSError, e:
-+ except OSError as e:
- logging.error('Failed to remove %s. error: %s', self._path, e)
-
-
--def _ExecuteTest((command, gtest_report_dir)):
-+def _ExecuteTest(args):
- """Executes tests with specified Test command.
-
- Args:
-@@ -122,6 +122,7 @@
- module, which is used in multiprocessing module.
- (http://docs.python.org/library/pickle.html)
- """
-+ (command, gtest_report_dir) = args
- binary = command[0]
- binary_filename = os.path.basename(binary)
- tmp_dir = tempfile.mkdtemp()
---- /src/build_tools/tweak_data.py
-+++ /src/build_tools/tweak_data.py
-@@ -55,7 +55,7 @@
- The value for the variable if the variable is defined in the
- environment. Otherwise original string is returned.
- """
-- if environment.has_key(matchobj.group(1)):
-+ if matchobj.group(1) in environment:
- return environment[matchobj.group(1)]
- return matchobj.group(0)
-
---- /src/build_tools/tweak_info_plist.py
-+++ /src/build_tools/tweak_info_plist.py
-@@ -42,8 +42,8 @@
- import logging
- import optparse
- import sys
--import mozc_version
--import tweak_data
-+from . import mozc_version
-+from . import tweak_data
-
- _COPYRIGHT_YEAR = datetime.date.today().year
-
-@@ -81,7 +81,7 @@
-
- version = mozc_version.MozcVersion(options.version_file)
-
-- copyright_message = (u'© %d Google Inc.' % _COPYRIGHT_YEAR).encode('utf-8')
-+ copyright_message = ('© %d Google Inc.' % _COPYRIGHT_YEAR).encode('utf-8')
- long_version = version.GetVersionString()
- short_version = version.GetVersionInFormat('@MAJOR@.@MINOR@.@BUILD@')
-
---- /src/build_tools/tweak_info_plist_strings.py
-+++ /src/build_tools/tweak_info_plist_strings.py
-@@ -40,7 +40,7 @@
- import logging
- import optparse
- import sys
--import tweak_data
-+from . import tweak_data
-
- _COPYRIGHT_YEAR = datetime.date.today().year
-
-@@ -77,7 +77,7 @@
- if options.branding == 'GoogleJapaneseInput':
- variables = {
- 'CF_BUNDLE_NAME_EN': 'Google Japanese Input',
-- 'CF_BUNDLE_NAME_JA': u'Google 日本語入力'.encode('utf-8'),
-+ 'CF_BUNDLE_NAME_JA': 'Google 日本語入力'.encode('utf-8'),
- 'NS_HUMAN_READABLE_COPYRIGHT': copyright_message,
- 'INPUT_MODE_ANNOTATION': 'Google',
- }
---- /src/build_tools/tweak_macinstaller_script.py
-+++ /src/build_tools/tweak_macinstaller_script.py
-@@ -39,7 +39,7 @@
- import logging
- import optparse
-
--import mozc_version
-+from . import mozc_version
-
-
- def _ReplaceVariables(data, environment):
---- /src/build_tools/tweak_pkgproj.py
-+++ /src/build_tools/tweak_pkgproj.py
-@@ -45,7 +45,7 @@
- import os
- import plistlib
- import re
--import mozc_version
-+from . import mozc_version
-
- from os import path
-
-@@ -71,7 +71,7 @@
- The value for the variable if the variable is defined in the
- environment. Otherwise original string is returned.
- """
-- if environment.has_key(matchobj.group(1)):
-+ if matchobj.group(1) in environment:
- return environment[matchobj.group(1)]
- return matchobj.group(0)
-
---- /src/build_tools/util.py
-+++ /src/build_tools/util.py
-@@ -73,11 +73,11 @@
- return 1
-
-
--class RunOrDieError(StandardError):
-+class RunOrDieError(Exception):
- """The exception class for RunOrDie."""
-
- def __init__(self, message):
-- StandardError.__init__(self, message)
-+ Exception.__init__(self, message)
-
-
- def RunOrDie(argv):
-@@ -105,7 +105,7 @@
- return # Do nothing if not exist.
- if IsWindows():
- # Read-only files cannot be deleted on Windows.
-- os.chmod(file_name, 0700)
-+ os.chmod(file_name, 0o700)
- logging.debug('Removing file: %s', file_name)
- os.unlink(file_name)
-
---- /src/build_tools/zlib_util.py
-+++ /src/build_tools/zlib_util.py
-@@ -58,7 +58,7 @@
-
- def main():
- if len(sys.argv) != 4:
-- print >>sys.stderr, 'Invalid arguments'
-+ print('Invalid arguments', file=sys.stderr)
- return
- if sys.argv[1] == 'compress':
- Compress(sys.argv[2], sys.argv[3])
-@@ -66,7 +66,7 @@
- if sys.argv[1] == 'decompress':
- Decompress(sys.argv[2], sys.argv[3])
- return
-- print >>sys.stderr, 'Unknown command:', sys.argv[1]
-+ print('Unknown command:', sys.argv[1], file=sys.stderr)
-
-
- if __name__ == '__main__':
---- /src/composer/internal/gen_typing_model.py
-+++ /src/composer/internal/gen_typing_model.py
-@@ -54,14 +54,13 @@
- __author__ = "noriyukit"
-
- import bisect
--import codecs
- import collections
- import optparse
- import struct
-
- UNDEFINED_COST = -1
--MAX_UINT16 = struct.unpack('H', '\xFF\xFF')[0]
--MAX_UINT8 = struct.unpack('B', '\xFF')[0]
-+MAX_UINT16 = struct.unpack('H', b'\xFF\xFF')[0]
-+MAX_UINT8 = struct.unpack('B', b'\xFF')[0]
-
-
- def ParseArgs():
-@@ -113,7 +112,7 @@
- sorted_values = list(sorted(set(values)))
- mapping_table = sorted_values[0]
- mapping_table_size_without_special_value = mapping_table_size - 1
-- span = len(sorted_values) / (mapping_table_size_without_special_value - 1)
-+ span = len(sorted_values) // (mapping_table_size_without_special_value - 1)
- mapping_table = [sorted_values[i * span]
- for i
- in range(0, mapping_table_size_without_special_value - 1)]
-@@ -150,7 +149,7 @@
-
- def GetValueTable(unique_characters, mapping_table, dictionary):
- result = []
-- for key, value in dictionary.iteritems():
-+ for key, value in dictionary.items():
- index = GetIndexFromKey(unique_characters, key)
- while len(result) <= index:
- result.append(len(mapping_table) - 1)
-@@ -167,13 +166,13 @@
- romaji_transition_cost)
- with open(output_path, 'wb') as f:
- f.write(struct.pack('<I', len(unique_characters)))
-- f.write(''.join(unique_characters))
-+ f.write(''.join(unique_characters).encode('utf-8'))
- offset = 4 + len(unique_characters)
-
- # Add padding to place value list size at 4-byte boundary.
- if offset % 4:
- padding_size = 4 - offset % 4
-- f.write('\x00' * padding_size)
-+ f.write(b'\x00' * padding_size)
- offset += padding_size
-
- f.write(struct.pack('<I', len(value_list)))
-@@ -184,7 +183,7 @@
- # Add padding to place mapping_table at 4-byte boundary.
- if offset % 4:
- padding_size = 4 - offset % 4
-- f.write('\x00' * padding_size)
-+ f.write(b'\x00' * padding_size)
- offset += padding_size
-
- for v in mapping_table:
-@@ -198,7 +197,8 @@
- # - trigram['vw']['x'] = -500 * log(P(x | 'vw'))
- unigram = {}
- trigram = collections.defaultdict(dict)
-- for line in codecs.open(options.input_path, 'r', encoding='utf-8'):
-+ input_file = open(options.input_path, 'r', encoding='utf-8')
-+ for line in input_file:
- line = line.rstrip()
- ngram, cost = line.split('\t')
- cost = int(cost)
-@@ -206,6 +206,7 @@
- unigram[ngram] = cost
- else:
- trigram[ngram[:-1]][ngram[-1]] = cost
-+ input_file.close()
-
- # Calculate ngram-related cost for each 'vw' and 'x':
- # -500 * log( P('x' | 'vw') / P('x') )
---- /src/converter/gen_boundary_data.py
-+++ /src/converter/gen_boundary_data.py
-@@ -70,7 +70,8 @@
- def LoadPatterns(file):
- prefix = []
- suffix = []
-- for line in open(file, 'r'):
-+ fh = open(file, 'r')
-+ for line in fh:
- if len(line) <= 1 or line[0] == '#':
- continue
- fields = line.split()
-@@ -84,8 +85,9 @@
- elif label == 'SUFFIX':
- suffix.append([re.compile(PatternToRegexp(feature)), cost])
- else:
-- print 'format error %s' % (line)
-+ print('format error %s' % (line))
- sys.exit(0)
-+ fh.close()
- return (prefix, suffix)
-
-
-@@ -100,19 +102,23 @@
-
- def LoadFeatures(filename):
- features = []
-- for line in open(filename, 'r'):
-+ fh = open(filename, 'r')
-+ for line in fh:
- fields = line.split()
- features.append(fields[1])
-+ fh.close()
- return features
-
-
- def CountSpecialPos(filename):
- count = 0
-- for line in open(filename, 'r'):
-+ fh = open(filename, 'r')
-+ for line in fh:
- line = line.rstrip()
- if not line or line[0] == '#':
- continue
- count += 1
-+ fh.close()
- return count
-
-
-@@ -141,7 +147,7 @@
- f.write(struct.pack('<H', GetCost(prefix, feature)))
- f.write(struct.pack('<H', GetCost(suffix, feature)))
-
-- for _ in xrange(num_special_pos):
-+ for _ in range(num_special_pos):
- f.write(struct.pack('<H', 0))
- f.write(struct.pack('<H', 0))
-
---- /src/converter/gen_quality_regression_test_data.py
-+++ /src/converter/gen_quality_regression_test_data.py
-@@ -84,7 +84,7 @@
- else _ENABLED)
- id = issue.attributes['id'].value
- target = GetText(issue.getElementsByTagName('target'))
-- for detail in issue.getElementsByTagName(u'detail'):
-+ for detail in issue.getElementsByTagName('detail'):
- fields = []
- fields.append('mozcsu_%s' % id)
- for key in ('reading', 'output', 'actionStatus', 'rank', 'accuracy'):
-@@ -104,19 +104,19 @@
-
- def GenerateHeader(files):
- try:
-- print 'namespace mozc{'
-- print 'struct TestCase {'
-- print ' const bool enabled;'
-- print ' const char *tsv;'
-- print '} kTestData[] = {'
-+ print('namespace mozc{')
-+ print('struct TestCase {')
-+ print(' const bool enabled;')
-+ print(' const char *tsv;')
-+ print('} kTestData[] = {')
- for file in files:
- for enabled, line in ParseFile(file):
-- print ' {%s, "%s"},' % (enabled, EscapeString(line))
-- print ' {false, nullptr},'
-- print '};'
-- print '} // namespace mozc'
-+ print(' {%s, "%s"},' % (enabled, EscapeString(line)))
-+ print(' {false, nullptr},')
-+ print('};')
-+ print('} // namespace mozc')
- except:
-- print 'cannot open %s' % (file)
-+ print('cannot open %s' % (file))
- sys.exit(1)
-
-
---- /src/converter/gen_segmenter_code.py
-+++ /src/converter/gen_segmenter_code.py
-@@ -54,18 +54,22 @@
- pos = {}
- max_id = 0
-
-- for line in open(id_file, "r"):
-+ fh = open(id_file, "r")
-+ for line in fh:
- fields = line.split()
- pos[fields[1]] = fields[0]
- max_id = max(int(fields[0]), max_id)
-+ fh.close()
-
- max_id = max_id + 1
-- for line in open(special_pos_file, "r"):
-+ fh = open(special_pos_file, "r")
-+ for line in fh:
- if len(line) <= 1 or line[0] == '#':
- continue
- fields = line.split()
- pos[fields[0]] = ("%d" % max_id)
- max_id = max_id + 1
-+ fh.close()
-
- return pos
-
-@@ -79,8 +83,7 @@
- pat = re.compile(PatternToRegexp(pattern))
- min = -1;
- max = -1;
-- keys = pos.keys()
-- keys.sort()
-+ keys = sorted(pos.keys())
-
- range = []
-
-@@ -107,7 +110,7 @@
- tmp.append("(%s >= %s && %s <= %s)" % (name, r[0], name, r[1]))
-
- if len(tmp) == 0:
-- print "FATAL: No rule fiind %s" % (pattern)
-+ print("FATAL: No rule fiind %s" % (pattern))
- sys.exit(-1)
-
- return " || ".join(tmp)
-@@ -115,19 +118,21 @@
- def main():
- pos = ReadPOSID(sys.argv[1], sys.argv[2])
-
-- print HEADER % (len(pos.keys()), len(pos.keys()))
-+ print(HEADER % (len(pos.keys()), len(pos.keys())))
-
-- for line in open(sys.argv[3], "r"):
-+ fh = open(sys.argv[3], "r")
-+ for line in fh:
- if len(line) <= 1 or line[0] == '#':
- continue
- (l, r, result) = line.split()
- result = result.lower()
- lcond = GetRange(pos, l, "rid") or "true";
- rcond = GetRange(pos, r, "lid") or "true";
-- print " // %s %s %s" % (l, r, result)
-- print " if ((%s) && (%s)) { return %s; }" % (lcond, rcond, result)
-+ print(" // %s %s %s" % (l, r, result))
-+ print(" if ((%s) && (%s)) { return %s; }" % (lcond, rcond, result))
-+ fh.close()
-
-- print FOOTER
-+ print(FOOTER)
-
- if __name__ == "__main__":
- main()
---- /src/data_manager/gen_connection_data.py
-+++ /src/data_manager/gen_connection_data.py
-@@ -32,8 +32,7 @@
-
- __author__ = "hidehiko"
-
--import cStringIO as StringIO
--import itertools
-+import io
- import logging
- import optparse
- import os
-@@ -45,7 +44,7 @@
- INVALID_COST = 30000
- INVALID_1BYTE_COST = 255
- RESOLUTION_FOR_1BYTE = 64
--FILE_MAGIC = '\xAB\xCD'
-+FILE_MAGIC = b'\xAB\xCD'
-
- FALSE_VALUES = ['f', 'false', '0']
- TRUE_VALUES = ['t', 'true', '1']
-@@ -79,28 +78,28 @@
- # The result is a square matrix.
- mat_size = pos_size + special_pos_size
-
-- matrix = [[0] * mat_size for _ in xrange(mat_size)]
-+ matrix = [[0] * mat_size for _ in range(mat_size)]
- with open(text_connection_file) as stream:
- stream = code_generator_util.SkipLineComment(stream)
- # The first line contains the matrix column/row size.
-- size = stream.next().rstrip()
-+ size = next(stream).rstrip()
- assert (int(size) == pos_size), '%s != %d' % (size, pos_size)
-
- for array_index, cost in enumerate(stream):
- cost = int(cost.rstrip())
-- rid = array_index / pos_size
-+ rid = array_index // pos_size
- lid = array_index % pos_size
- if rid == 0 and lid == 0:
- cost = 0
- matrix[rid][lid] = cost
-
- # Fill INVALID_COST in matrix elements for special POS.
-- for rid in xrange(pos_size, mat_size):
-- for lid in xrange(1, mat_size): # Skip EOS
-+ for rid in range(pos_size, mat_size):
-+ for lid in range(1, mat_size): # Skip EOS
- matrix[rid][lid] = INVALID_COST
-
-- for lid in xrange(pos_size, mat_size):
-- for rid in xrange(1, mat_size): # Skip BOS
-+ for lid in range(pos_size, mat_size):
-+ for rid in range(1, mat_size): # Skip BOS
- matrix[rid][lid] = INVALID_COST
-
- return matrix
-@@ -116,7 +115,7 @@
- # Heuristically, we do not compress INVALID_COST.
- continue
- m[cost] = m.get(cost, 0) + 1
-- mode_value = max(m.iteritems(), key=lambda (_, count): count)[0]
-+ mode_value = max(m.items(), key=lambda x: x[1])[0]
- result.append(mode_value)
- return result
-
-@@ -126,8 +125,8 @@
- # list, and fill None into the matrix if it equals to the corresponding
- # mode value.
- assert len(matrix) == len(mode_value_list)
-- for row, mode_value in itertools.izip(matrix, mode_value_list):
-- for index in xrange(len(row)):
-+ for row, mode_value in zip(matrix, mode_value_list):
-+ for index in range(len(row)):
- if row[index] == mode_value:
- row[index] = None
-
-@@ -179,7 +178,7 @@
- resolution = RESOLUTION_FOR_1BYTE
- else:
- resolution = 1
-- stream = StringIO.StringIO()
-+ stream = io.BytesIO()
-
- # Output header.
- stream.write(FILE_MAGIC)
-@@ -194,7 +193,7 @@
-
- # 4 bytes alignment.
- if len(mode_value_list) % 2:
-- stream.write('\x00\x00')
-+ stream.write(b'\x00\x00')
-
- # Process each row:
- for row in matrix:
-@@ -218,7 +217,7 @@
- if cost == INVALID_COST:
- cost = INVALID_1BYTE_COST
- else:
-- cost /= resolution
-+ cost //= resolution
- assert cost != INVALID_1BYTE_COST
- values.append(cost)
-
-@@ -237,7 +236,7 @@
- values_size = len(values) * 2
-
- # Output the bits for a row.
-- stream.write(struct.pack('<HH', len(compact_bits) / 8, values_size))
-+ stream.write(struct.pack('<HH', len(compact_bits) // 8, values_size))
- OutputBitList(chunk_bits, stream)
- OutputBitList(compact_bits, stream)
- if use_1byte_cost:
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch
deleted file mode 100644
index a5c5a2dc8038..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch
+++ /dev/null
@@ -1,583 +0,0 @@
-https://github.com/google/mozc/issues/462
-
---- /src/dictionary/gen_pos_map.py
-+++ /src/dictionary/gen_pos_map.py
-@@ -39,7 +39,7 @@
- from build_tools import code_generator_util
-
-
--HEADER = """// Copyright 2009 Google Inc. All Rights Reserved.
-+HEADER = b"""// Copyright 2009 Google Inc. All Rights Reserved.
- // Author: keni
-
- #ifndef MOZC_DICTIONARY_POS_MAP_H_
-@@ -48,13 +48,13 @@
- // POS conversion rules
- const POSMap kPOSMap[] = {
- """
--FOOTER = """};
-+FOOTER = b"""};
-
- #endif // MOZC_DICTIONARY_POS_MAP_H_
- """
-
- def ParseUserPos(user_pos_file):
-- with open(user_pos_file, 'r') as stream:
-+ with open(user_pos_file, 'rb') as stream:
- stream = code_generator_util.SkipLineComment(stream)
- stream = code_generator_util.ParseColumnStream(stream, num_column=2)
- return dict((key, enum_value) for key, enum_value in stream)
-@@ -64,7 +64,7 @@
- user_pos_map = ParseUserPos(user_pos_file)
-
- result = {}
-- with open(third_party_pos_map_file, 'r') as stream:
-+ with open(third_party_pos_map_file, 'rb') as stream:
- stream = code_generator_util.SkipLineComment(stream)
- for columns in code_generator_util.ParseColumnStream(stream, num_column=2):
- third_party_pos_name, mozc_pos = (columns + [None])[:2]
-@@ -78,7 +78,7 @@
- result[third_party_pos_name] = mozc_pos
-
- # Create mozc_pos to mozc_pos map.
-- for key, value in user_pos_map.iteritems():
-+ for key, value in user_pos_map.items():
- if key in result:
- assert (result[key] == value)
- continue
-@@ -94,10 +94,10 @@
- if value is None:
- # Invalid PosType.
- value = (
-- 'static_cast< ::mozc::user_dictionary::UserDictionary::PosType>(-1)')
-+ b'static_cast< ::mozc::user_dictionary::UserDictionary::PosType>(-1)')
- else:
-- value = '::mozc::user_dictionary::UserDictionary::' + value
-- output.write(' { %s, %s },\n' % (key, value))
-+ value = b'::mozc::user_dictionary::UserDictionary::' + value
-+ output.write(b' { %s, %s },\n' % (key, value))
- output.write(FOOTER)
-
-
-@@ -121,7 +121,7 @@
- pos_map = GeneratePosMap(options.third_party_pos_map_file,
- options.user_pos_file)
-
-- with open(options.output, 'w') as stream:
-+ with open(options.output, 'wb') as stream:
- OutputPosMap(pos_map, stream)
-
-
---- /src/dictionary/gen_pos_rewrite_rule.py
-+++ /src/dictionary/gen_pos_rewrite_rule.py
-@@ -46,29 +46,34 @@
-
-
- def LoadRewriteMapRule(filename):
-- fh = open(filename)
-+ fh = open(filename, 'rb')
- rule = []
- for line in fh:
-- line = line.rstrip('\n')
-- if not line or line.startswith('#'):
-+ line = line.rstrip(b'\n')
-+ if not line or line.startswith(b'#'):
- continue
- fields = line.split()
- rule.append([fields[0], fields[1]])
-+ fh.close()
- return rule
-
-
- def ReadPOSID(id_file, special_pos_file):
- pos_list = []
-
-- for line in open(id_file, 'r'):
-+ fh = open(id_file, 'rb')
-+ for line in fh:
- fields = line.split()
- pos_list.append(fields[1])
-+ fh.close()
-
-- for line in open(special_pos_file, 'r'):
-- if len(line) <= 1 or line[0] == '#':
-+ fh = open(special_pos_file, 'rb')
-+ for line in fh:
-+ if len(line) <= 1 or line[0:1] == b'#':
- continue
- fields = line.split()
- pos_list.append(fields[0])
-+ fh.close()
-
- return pos_list
-
-@@ -112,7 +117,7 @@
- ids.append(id)
-
- with open(opts.output, 'wb') as f:
-- f.write(''.join(chr(id) for id in ids))
-+ f.write(''.join(chr(id) for id in ids).encode('utf-8'))
-
-
- if __name__ == '__main__':
---- /src/dictionary/gen_suffix_data.py
-+++ /src/dictionary/gen_suffix_data.py
-@@ -52,10 +52,10 @@
- opts = _ParseOptions()
-
- result = []
-- with open(opts.input, 'r') as stream:
-+ with open(opts.input, 'rb') as stream:
- for line in stream:
-- line = line.rstrip('\r\n')
-- fields = line.split('\t')
-+ line = line.rstrip(b'\r\n')
-+ fields = line.split(b'\t')
- key = fields[0]
- lid = int(fields[1])
- rid = int(fields[2])
-@@ -63,7 +63,7 @@
- value = fields[4]
-
- if key == value:
-- value = ''
-+ value = b''
-
- result.append((key, value, lid, rid, cost))
-
---- /src/dictionary/gen_user_pos_data.py
-+++ /src/dictionary/gen_user_pos_data.py
-@@ -64,7 +64,7 @@
- f.write(struct.pack('<H', conjugation_id))
-
- serialized_string_array_builder.SerializeToFile(
-- sorted(string_index.iterkeys()), output_string_array)
-+ sorted(x.encode('utf-8') for x in string_index.keys()), output_string_array)
-
-
- def ParseOptions():
-@@ -100,7 +100,7 @@
-
- if options.output_pos_list:
- serialized_string_array_builder.SerializeToFile(
-- [pos for (pos, _) in user_pos.data], options.output_pos_list)
-+ [pos.encode('utf-8') for (pos, _) in user_pos.data], options.output_pos_list)
-
-
- if __name__ == '__main__':
---- /src/dictionary/gen_zip_code_seed.py
-+++ /src/dictionary/gen_zip_code_seed.py
-@@ -83,7 +83,7 @@
- address = unicodedata.normalize('NFKC', self.address)
- line = '\t'.join([zip_code, '0', '0', str(ZIP_CODE_COST),
- address, ZIP_CODE_LABEL])
-- print line.encode('utf-8')
-+ print(line.encode('utf-8'))
-
-
- def ProcessZipCodeCSV(file_name):
-@@ -105,26 +105,26 @@
-
- def ReadZipCodeEntries(zip_code, level1, level2, level3):
- """Read zip code entries."""
-- return [ZipEntry(zip_code, u''.join([level1, level2, town]))
-+ return [ZipEntry(zip_code, ''.join([level1, level2, town]))
- for town in ParseTownName(level3)]
-
-
- def ReadJigyosyoEntry(zip_code, level1, level2, level3, name):
- """Read jigyosyo entry."""
- return ZipEntry(zip_code,
-- u''.join([level1, level2, level3, u' ', name]))
-+ ''.join([level1, level2, level3, ' ', name]))
-
-
- def ParseTownName(level3):
- """Parse town name."""
-- if level3.find(u'以下に掲載がない場合') != -1:
-+ if level3.find('以下に掲載がない場合') != -1:
- return ['']
-
- assert CanParseAddress(level3), ('failed to be merged %s'
- % level3.encode('utf-8'))
-
- # We ignore additional information here.
-- level3 = re.sub(u'(.*)', u'', level3, re.U)
-+ level3 = re.sub('(.*)', '', level3, re.U)
-
- # For 地割, we have these cases.
- # XX1地割
-@@ -134,7 +134,7 @@
- # XX第1地割、XX第2地割、
- # XX第1地割〜XX第2地割、
- # We simply use XX for them.
-- chiwari_match = re.match(u'(\D*?)第?\d+地割.*', level3, re.U)
-+ chiwari_match = re.match('(\D*?)第?\d+地割.*', level3, re.U)
- if chiwari_match:
- town = chiwari_match.group(1)
- return [town]
-@@ -144,21 +144,21 @@
- # -> XX町YY and (XX町)ZZ
- # YY、ZZ
- # -> YY and ZZ
-- chou_match = re.match(u'(.*町)?(.*)', level3, re.U)
-+ chou_match = re.match('(.*町)?(.*)', level3, re.U)
- if chou_match:
-- chou = u''
-+ chou = ''
- if chou_match.group(1):
- chou = chou_match.group(1)
- rests = chou_match.group(2)
-- return [chou + rest for rest in rests.split(u'、')]
-+ return [chou + rest for rest in rests.split('、')]
-
- return [level3]
-
-
- def CanParseAddress(address):
- """Return true for valid address."""
-- return (address.find(u'(') == -1 or
-- address.find(u')') != -1)
-+ return (address.find('(') == -1 or
-+ address.find(')') != -1)
-
-
- def ParseOptions():
---- /src/dictionary/zip_code_util.py
-+++ /src/dictionary/zip_code_util.py
-@@ -86,11 +86,11 @@
-
-
- _SPECIAL_CASES = [
-- SpecialMergeZip(u'5900111', u'大阪府', u'堺市中区', [u'三原台']),
-- SpecialMergeZip(u'8710046', u'大分県', u'中津市',
-- [u'金谷', u'西堀端', u'東堀端', u'古金谷']),
-- SpecialMergeZip(u'9218046', u'石川県', u'金沢市',
-- [u'大桑町', u'三小牛町']),
-+ SpecialMergeZip('5900111', '大阪府', '堺市中区', ['三原台']),
-+ SpecialMergeZip('8710046', '大分県', '中津市',
-+ ['金谷', '西堀端', '東堀端', '古金谷']),
-+ SpecialMergeZip('9218046', '石川県', '金沢市',
-+ ['大桑町', '三小牛町']),
- ]
-
-
---- /src/gui/character_pad/data/gen_cp932_map.py
-+++ /src/gui/character_pad/data/gen_cp932_map.py
-@@ -32,7 +32,6 @@
-
- import re
- import sys
--import string
-
- kUnicodePat = re.compile(r'0x[0-9A-Fa-f]{2,4}')
- def IsValidUnicode(n):
-@@ -42,28 +41,29 @@
- fh = open(sys.argv[1])
- result = {}
- for line in fh.readlines():
-- if line[0] is '#':
-+ if line[0] == '#':
- continue
-- array = string.split(line)
-+ array = line.split()
- sjis = array[0]
- ucs2 = array[1]
- if eval(sjis) < 32 or not IsValidUnicode(ucs2):
- continue
- result.setdefault(ucs2, sjis)
-+ fh.close()
-
- keys = sorted(result.keys())
-
-- print "struct CP932MapData {"
-- print " unsigned int ucs4;"
-- print " unsigned short int sjis;"
-- print "};"
-- print ""
-- print "static const size_t kCP932MapDataSize = %d;" % (len(keys))
-- print "static const CP932MapData kCP932MapData[] = {"
-+ print("struct CP932MapData {")
-+ print(" unsigned int ucs4;")
-+ print(" unsigned short int sjis;")
-+ print("};")
-+ print("")
-+ print("static const size_t kCP932MapDataSize = %d;" % (len(keys)))
-+ print("static const CP932MapData kCP932MapData[] = {")
- for n in keys:
-- print " { %s, %s }," % (n ,result[n])
-- print " { 0, 0 }";
-- print "};"
-+ print(" { %s, %s }," % (n ,result[n]))
-+ print(" { 0, 0 }");
-+ print("};")
-
- if __name__ == "__main__":
- main()
---- /src/gui/character_pad/data/gen_local_character_map.py
-+++ /src/gui/character_pad/data/gen_local_character_map.py
-@@ -30,7 +30,6 @@
-
- __author__ = "taku"
-
--import string
- import re
- import sys
-
-@@ -43,9 +42,9 @@
- fh = open(filename)
- result = []
- for line in fh.readlines():
-- if line[0] is '#':
-+ if line[0] == '#':
- continue
-- array = string.split(line)
-+ array = line.split()
- jis = array[0].replace('0x', '')
- ucs2 = array[1].replace('0x', '')
- if len(jis) == 2:
-@@ -53,6 +52,7 @@
-
- if IsValidUnicode(ucs2):
- result.append([jis, ucs2])
-+ fh.close()
-
- return ["JISX0201", result]
-
-@@ -60,13 +60,14 @@
- fh = open(filename)
- result = []
- for line in fh.readlines():
-- if line[0] is '#':
-+ if line[0] == '#':
- continue
- array = line.split()
- jis = array[1].replace('0x', '')
- ucs2 = array[2].replace('0x', '')
- if IsValidUnicode(ucs2):
- result.append([jis, ucs2])
-+ fh.close()
-
- return ["JISX0208", result]
-
-@@ -74,13 +75,14 @@
- fh = open(filename)
- result = []
- for line in fh.readlines():
-- if line[0] is '#':
-+ if line[0] == '#':
- continue
- array = line.split()
- jis = array[0].replace('0x', '')
- ucs2 = array[1].replace('0x', '')
- if IsValidUnicode(ucs2):
- result.append([jis, ucs2])
-+ fh.close()
-
- return ["JISX0212", result]
-
-@@ -88,7 +90,7 @@
- fh = open(filename)
- result = []
- for line in fh.readlines():
-- if line[0] is '#':
-+ if line[0] == '#':
- continue
- array = line.split()
- sjis = array[0].replace('0x', '')
-@@ -100,19 +102,20 @@
-
- if IsValidUnicode(ucs2):
- result.append([sjis, ucs2])
-+ fh.close()
-
- return ["CP932", result]
-
- def Output(arg):
- name = arg[0]
- result = arg[1]
-- print "static const size_t k%sMapSize = %d;" % (name, len(result))
-- print "static const mozc::gui::CharacterPalette::LocalCharacterMap k%sMap[] = {" % (name)
-+ print("static const size_t k%sMapSize = %d;" % (name, len(result)))
-+ print("static const mozc::gui::CharacterPalette::LocalCharacterMap k%sMap[] = {" % (name))
- for n in result:
-- print " { 0x%s, 0x%s }," % (n[0] ,n[1])
-- print " { 0, 0 }";
-- print "};"
-- print ""
-+ print(" { 0x%s, 0x%s }," % (n[0] ,n[1]))
-+ print(" { 0, 0 }");
-+ print("};")
-+ print("")
-
- if __name__ == "__main__":
- Output(LoadJISX0201(sys.argv[1]))
---- /src/gui/character_pad/data/gen_unicode_blocks.py
-+++ /src/gui/character_pad/data/gen_unicode_blocks.py
-@@ -33,13 +33,13 @@
- import sys
- import re
-
--re = re.compile('^(.....?)\.\.(.....?); (.+)')
-+re = re.compile(r'^(.....?)\.\.(.....?); (.+)')
-
- def main():
-- print "static const mozc::gui::CharacterPalette::UnicodeBlock kUnicodeBlockTable[] = {"
-+ print("static const mozc::gui::CharacterPalette::UnicodeBlock kUnicodeBlockTable[] = {")
- fh = open(sys.argv[1])
- for line in fh.readlines():
-- if line[0] is '#':
-+ if line[0] == '#':
- continue
- m = re.match(line)
- if m is not None:
-@@ -47,11 +47,12 @@
- end = int(m.group(2), 16)
- name = m.group(3)
- if start <= 0x2FFFF and end <= 0x2FFFF:
-- print " { \"%s\", { %d, %d } }," % (name, start, end)
-+ print(" { \"%s\", { %d, %d } }," % (name, start, end))
-+ fh.close()
-
-- print " { NULL, { 0, 0 } }"
-- print "};"
-- print ""
-+ print(" { NULL, { 0, 0 } }")
-+ print("};")
-+ print("")
-
- if __name__ == "__main__":
- main()
---- /src/gui/character_pad/data/gen_unicode_data.py
-+++ /src/gui/character_pad/data/gen_unicode_data.py
-@@ -46,18 +46,19 @@
- code = int(code, 16)
- if code < 0x2FFFF:
- results.append(" { %d, \"%s\" }," % (code, desc))
-+ fh.close()
-
-- print "struct UnicodeData {";
-- print " char32 ucs4;";
-- print " const char *description;";
-- print "};";
-- print ""
-- print "static const size_t kUnicodeDataSize = %d;" % (len(results))
-- print "static const UnicodeData kUnicodeData[] = {";
-+ print("struct UnicodeData {");
-+ print(" char32 ucs4;");
-+ print(" const char *description;");
-+ print("};");
-+ print("")
-+ print("static const size_t kUnicodeDataSize = %d;" % (len(results)))
-+ print("static const UnicodeData kUnicodeData[] = {");
- for line in results:
-- print line;
-- print " { 0, NULL }";
-- print "};";
-+ print(line);
-+ print(" { 0, NULL }");
-+ print("};");
-
- if __name__ == "__main__":
- main()
---- /src/gui/character_pad/data/gen_unihan_data.py
-+++ /src/gui/character_pad/data/gen_unihan_data.py
-@@ -31,35 +31,34 @@
- __author__ = "taku"
-
- import re
--import string
- import sys
- rs = {}
-
- def Escape(n):
-- if n is not "NULL":
-+ if n != "NULL":
- return "\"%s\"" % (n)
- else:
- return "NULL"
-
- def GetCode(n):
-- if n is not "NULL":
-- n = string.replace(n, '0-', 'JIS X 0208: 0x')
-- n = string.replace(n, '1-', 'JIS X 0212: 0x')
-- n = string.replace(n, '3-', 'JIS X 0213: 0x')
-- n = string.replace(n, '4-', 'JIS X 0213: 0x')
-- n = string.replace(n, 'A-', 'Vendors Ideographs: 0x')
-- n = string.replace(n, '3A', 'JIS X 0213 2000: 0x')
-+ if n != "NULL":
-+ n = n.replace('0-', 'JIS X 0208: 0x')
-+ n = n.replace('1-', 'JIS X 0212: 0x')
-+ n = n.replace('3-', 'JIS X 0213: 0x')
-+ n = n.replace('4-', 'JIS X 0213: 0x')
-+ n = n.replace('A-', 'Vendors Ideographs: 0x')
-+ n = n.replace('3A', 'JIS X 0213 2000: 0x')
- return "\"%s\"" % n
- else:
- return "NULL"
-
- def GetRadical(n):
- pat = re.compile(r'^(\d+)\.')
-- if n is not "NULL":
-+ if n != "NULL":
- m = pat.match(n)
- if m:
- result = rs[m.group(1)]
-- return "\"%s\"" % (result.encode('string_escape'))
-+ return "\"%s\"" % result
- else:
- return "NULL"
- else:
-@@ -73,6 +72,7 @@
- id = array[1]
- radical = array[2]
- rs[id] = radical
-+ fh.close()
-
- dic = {}
- pat = re.compile(r'^U\+(\S+)\s+(kTotalStrokes|kJapaneseKun|kJapaneseOn|kRSUnicode|kIRG_JSource)\t(.+)')
-@@ -86,23 +86,24 @@
- n = int(m.group(1), 16)
- if n <= 65536:
- dic.setdefault(key, {}).setdefault(field, value)
-+ fh.close()
-
- keys = sorted(dic.keys())
-
-- print "struct UnihanData {";
-- print " unsigned int ucs4;";
-+ print("struct UnihanData {");
-+ print(" unsigned int ucs4;");
- # Since the total strokes defined in Unihan data is Chinese-based
- # number, we can't use it.
- # print " unsigned char total_strokes;";
-- print " const char *japanese_kun;";
-- print " const char *japanese_on;";
-+ print(" const char *japanese_kun;");
-+ print(" const char *japanese_on;");
- # Since the radical information defined in Unihan data is Chinese-based
- # number, we can't use it.
- # print " const char *radical;";
-- print " const char *IRG_jsource;";
-- print "};"
-- print "static const size_t kUnihanDataSize = %d;" % (len(keys))
-- print "static const UnihanData kUnihanData[] = {"
-+ print(" const char *IRG_jsource;");
-+ print("};")
-+ print("static const size_t kUnihanDataSize = %d;" % (len(keys)))
-+ print("static const UnihanData kUnihanData[] = {")
-
- for key in keys:
- total_strokes = dic[key].get("kTotalStrokes", "0")
-@@ -111,9 +112,9 @@
- rad = GetRadical(dic[key].get("kRSUnicode", "NULL"))
- code = GetCode(dic[key].get("kIRG_JSource", "NULL"))
- # print " { 0x%s, %s, %s, %s, %s, %s }," % (key, total_strokes, kun, on, rad, code)
-- print " { 0x%s, %s, %s, %s }," % (key, kun, on, code)
-+ print(" { 0x%s, %s, %s, %s }," % (key, kun, on, code))
-
-- print "};"
-+ print("};")
-
- if __name__ == "__main__":
- main()
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch
deleted file mode 100644
index 41d2bf9eeb90..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch
+++ /dev/null
@@ -1,537 +0,0 @@
-https://github.com/google/mozc/issues/462
-
---- /src/prediction/gen_zero_query_data.py
-+++ /src/prediction/gen_zero_query_data.py
-@@ -59,20 +59,20 @@
- Returns:
- A integer indicating parsed pua.
- """
-- if not s or s[0] == '>':
-+ if not s or s[0:1] == b'>':
- return 0
- return int(s, 16)
-
-
- def NormalizeString(string):
- return unicodedata.normalize(
-- 'NFKC', string.decode('utf-8')).encode('utf-8').replace('~', '〜')
-+ 'NFKC', string.decode('utf-8')).replace('~', '〜').encode('utf-8')
-
-
- def RemoveTrailingNumber(string):
- if not string:
-- return ''
-- return re.sub(r'^([^0-9]+)[0-9]+$', r'\1', string)
-+ return b''
-+ return re.sub(br'^([^0-9]+)[0-9]+$', r'\1', string)
-
-
- def GetReadingsFromDescription(description):
-@@ -84,19 +84,19 @@
- # - ビル・建物
- # \xE3\x83\xBB : "・"
- return [RemoveTrailingNumber(token) for token
-- in re.split(r'(?:\(|\)|/|\xE3\x83\xBB)+', normalized)]
-+ in re.split(br'(?:\(|\)|/|\xE3\x83\xBB)+', normalized)]
-
-
- def ReadEmojiTsv(stream):
- """Reads emoji data from stream and returns zero query data."""
- zero_query_dict = defaultdict(list)
- stream = code_generator_util.SkipLineComment(stream)
-- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
-+ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
- if len(columns) != 13:
-- logging.critical('format error: %s', '\t'.join(columns))
-+ logging.critical('format error: %s', b'\t'.join(columns))
- sys.exit(1)
-
-- code_points = columns[0].split(' ')
-+ code_points = columns[0].split(b' ')
-
- # Emoji code point.
- emoji = columns[1]
-@@ -114,12 +114,12 @@
- # - Composite emoji which has multiple code point.
- # NOTE: Some Unicode 6.0 emoji don't have PUA, and it is also omitted.
- # TODO(hsumita): Check the availability of such emoji and enable it.
-- logging.info('Skip %s', ' '.join(code_points))
-+ logging.info('Skip %s', b' '.join(code_points))
- continue
-
- reading_list = []
- # \xe3\x80\x80 is a full-width space
-- for reading in re.split(r'(?: |\xe3\x80\x80)+', NormalizeString(readings)):
-+ for reading in re.split(br'(?: |\xe3\x80\x80)+', NormalizeString(readings)):
- if not reading:
- continue
- reading_list.append(reading)
-@@ -158,15 +158,15 @@
- zero_query_dict = defaultdict(list)
-
- for line in input_stream:
-- if line.startswith('#'):
-+ if line.startswith(b'#'):
- continue
-- line = line.rstrip('\r\n')
-+ line = line.rstrip(b'\r\n')
- if not line:
- continue
-
-- tokens = line.split('\t')
-+ tokens = line.split(b'\t')
- key = tokens[0]
-- values = tokens[1].split(',')
-+ values = tokens[1].split(b',')
-
- for value in values:
- zero_query_dict[key].append(
-@@ -179,16 +179,16 @@
- """Reads emoticon data from stream and returns zero query data."""
- zero_query_dict = defaultdict(list)
- stream = code_generator_util.SkipLineComment(stream)
-- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
-+ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
- if len(columns) != 3:
-- logging.critical('format error: %s', '\t'.join(columns))
-+ logging.critical('format error: %s', b'\t'.join(columns))
- sys.exit(1)
-
- emoticon = columns[0]
- readings = columns[2]
-
- # \xe3\x80\x80 is a full-width space
-- for reading in re.split(r'(?: |\xe3\x80\x80)+', readings.strip()):
-+ for reading in re.split(br'(?: |\xe3\x80\x80)+', readings.strip()):
- if not reading:
- continue
- zero_query_dict[reading].append(
-@@ -202,9 +202,9 @@
- """Reads emoji data from stream and returns zero query data."""
- zero_query_dict = defaultdict(list)
- stream = code_generator_util.SkipLineComment(stream)
-- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
-+ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
- if len(columns) < 3:
-- logging.warning('format error: %s', '\t'.join(columns))
-+ logging.warning('format error: %s', b'\t'.join(columns))
- continue
-
- symbol = columns[1]
-@@ -222,7 +222,7 @@
- continue
-
- # \xe3\x80\x80 is a full-width space
-- for reading in re.split(r'(?: |\xe3\x80\x80)+', readings.strip()):
-+ for reading in re.split(br'(?: |\xe3\x80\x80)+', readings.strip()):
- if not reading:
- continue
- zero_query_dict[reading].append(
-@@ -247,7 +247,7 @@
-
- def IsValidKeyForZeroQuery(key):
- """Returns if the key is valid for zero query trigger."""
-- is_ascii = all(ord(char) < 128 for char in key)
-+ is_ascii = all(char < 128 for char in key)
- return not is_ascii
-
-
-@@ -301,13 +301,13 @@
-
- def main():
- options = ParseOptions()
-- with open(options.input_rule, 'r') as input_stream:
-+ with open(options.input_rule, 'rb') as input_stream:
- zero_query_rule_dict = ReadZeroQueryRuleData(input_stream)
-- with open(options.input_symbol, 'r') as input_stream:
-+ with open(options.input_symbol, 'rb') as input_stream:
- zero_query_symbol_dict = ReadSymbolTsv(input_stream)
-- with open(options.input_emoji, 'r') as input_stream:
-+ with open(options.input_emoji, 'rb') as input_stream:
- zero_query_emoji_dict = ReadEmojiTsv(input_stream)
-- with open(options.input_emoticon, 'r') as input_stream:
-+ with open(options.input_emoticon, 'rb') as input_stream:
- zero_query_emoticon_dict = ReadEmoticonTsv(input_stream)
-
- merged_zero_query_dict = MergeZeroQueryData(
---- /src/prediction/gen_zero_query_number_data.py
-+++ /src/prediction/gen_zero_query_number_data.py
-@@ -41,15 +41,15 @@
- zero_query_dict = defaultdict(list)
-
- for line in input_stream:
-- if line.startswith('#'):
-+ if line.startswith(b'#'):
- continue
-- line = line.rstrip('\r\n')
-+ line = line.rstrip(b'\r\n')
- if not line:
- continue
-
-- tokens = line.split('\t')
-+ tokens = line.split(b'\t')
- key = tokens[0]
-- values = tokens[1].split(',')
-+ values = tokens[1].split(b',')
-
- for value in values:
- zero_query_dict[key].append(
-@@ -71,7 +71,7 @@
-
- def main():
- options = ParseOption()
-- with open(options.input, 'r') as input_stream:
-+ with open(options.input, 'rb') as input_stream:
- zero_query_dict = ReadZeroQueryNumberData(input_stream)
- util.WriteZeroQueryData(zero_query_dict,
- options.output_token_array,
---- /src/prediction/gen_zero_query_util.py
-+++ /src/prediction/gen_zero_query_util.py
-@@ -69,7 +69,7 @@
- output_string_array):
- # Collect all the strings and assing index in ascending order
- string_index = {}
-- for key, entry_list in zero_query_dict.iteritems():
-+ for key, entry_list in zero_query_dict.items():
- string_index[key] = 0
- for entry in entry_list:
- string_index[entry.value] = 0
---- /src/rewriter/gen_counter_suffix_array.py
-+++ /src/rewriter/gen_counter_suffix_array.py
-@@ -43,7 +43,7 @@
- with codecs.open(id_file, 'r', encoding='utf-8') as stream:
- stream = code_generator_util.ParseColumnStream(stream, num_column=2)
- for pos_id, pos_name in stream:
-- if pos_name.startswith(u'名詞,接尾,助数詞'):
-+ if pos_name.startswith('名詞,接尾,助数詞'):
- pos_ids.add(pos_id)
- return pos_ids
-
---- /src/rewriter/gen_emoji_rewriter_data.py
-+++ /src/rewriter/gen_emoji_rewriter_data.py
-@@ -74,19 +74,19 @@
- the glyph (in other words, it has alternative (primary) code point, which
- doesn't lead '>' and that's why we'll ignore it).
- """
-- if not s or s[0] == '>':
-+ if not s or s[0:1] == b'>':
- return None
- return int(s, 16)
-
-
--_FULLWIDTH_RE = re.compile(ur'[!-~]') # U+FF01 - U+FF5E
-+_FULLWIDTH_RE = re.compile(r'[!-~]') # U+FF01 - U+FF5E
-
-
- def NormalizeString(string):
- """Normalize full width ascii characters to half width characters."""
-- offset = ord(u'A') - ord(u'A')
-- return _FULLWIDTH_RE.sub(lambda x: unichr(ord(x.group(0)) - offset),
-- unicode(string, 'utf-8')).encode('utf-8')
-+ offset = ord('A') - ord('A')
-+ return _FULLWIDTH_RE.sub(lambda x: chr(ord(x.group(0)) - offset),
-+ string.decode('utf-8')).encode('utf-8')
-
-
- def ReadEmojiTsv(stream):
-@@ -96,14 +96,14 @@
- token_dict = defaultdict(list)
-
- stream = code_generator_util.SkipLineComment(stream)
-- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
-+ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
- if len(columns) != 13:
-- logging.critical('format error: %s', '\t'.join(columns))
-+ logging.critical('format error: %s', b'\t'.join(columns))
- sys.exit(1)
-
-- code_points = columns[0].split(' ')
-+ code_points = columns[0].split(b' ')
- # Emoji code point.
-- emoji = columns[1] if columns[1] else ''
-+ emoji = columns[1] if columns[1] else b''
- android_pua = ParseCodePoint(columns[2])
- docomo_pua = ParseCodePoint(columns[3])
- softbank_pua = ParseCodePoint(columns[4])
-@@ -112,10 +112,10 @@
- readings = columns[6]
-
- # [7]: Name defined in Unicode. It is ignored in current implementation.
-- utf8_description = columns[8] if columns[8] else ''
-- docomo_description = columns[9] if columns[9] else ''
-- softbank_description = columns[10] if columns[10] else ''
-- kddi_description = columns[11] if columns[11] else ''
-+ utf8_description = columns[8] if columns[8] else b''
-+ docomo_description = columns[9] if columns[9] else b''
-+ softbank_description = columns[10] if columns[10] else b''
-+ kddi_description = columns[11] if columns[11] else b''
-
- if not android_pua or len(code_points) > 1:
- # Skip some emoji, which is not supported on old devices.
-@@ -123,7 +123,7 @@
- # - Composite emoji which has multiple code point.
- # NOTE: Some Unicode 6.0 emoji don't have PUA, and it is also omitted.
- # TODO(hsumita): Check the availability of such emoji and enable it.
-- logging.info('Skip %s', ' '.join(code_points))
-+ logging.info('Skip %s', b' '.join(code_points))
- continue
-
- # Check consistency between carrier PUA codes and descriptions for Android
-@@ -132,7 +132,7 @@
- (bool(softbank_pua) != bool(softbank_description)) or
- (bool(kddi_pua) != bool(kddi_description))):
- logging.warning('carrier PUA and description conflict: %s',
-- '\t'.join(columns))
-+ b'\t'.join(columns))
- continue
-
- # Check if the character is usable on Android.
-@@ -140,7 +140,7 @@
- android_pua = 0 # Replace None with 0.
-
- if not emoji and not android_pua:
-- logging.info('Skip: %s', '\t'.join(columns))
-+ logging.info('Skip: %s', b'\t'.join(columns))
- continue
-
- index = len(emoji_data_list)
-@@ -149,7 +149,7 @@
- kddi_description))
-
- # \xe3\x80\x80 is a full-width space
-- for reading in re.split(r'(?: |\xe3\x80\x80)+', readings.strip()):
-+ for reading in re.split(br'(?: |\xe3\x80\x80)+', readings.strip()):
- if reading:
- token_dict[NormalizeString(reading)].append(index)
-
-@@ -159,7 +159,7 @@
- def OutputData(emoji_data_list, token_dict,
- token_array_file, string_array_file):
- """Output token and string arrays to files."""
-- sorted_token_dict = sorted(token_dict.iteritems())
-+ sorted_token_dict = sorted(token_dict.items())
-
- strings = {}
- for reading, _ in sorted_token_dict:
-@@ -171,7 +171,7 @@
- strings[docomo_description] = 0
- strings[softbank_description] = 0
- strings[kddi_description] = 0
-- sorted_strings = sorted(strings.iterkeys())
-+ sorted_strings = sorted(strings.keys())
- for index, s in enumerate(sorted_strings):
- strings[s] = index
-
-@@ -205,7 +205,7 @@
-
- def main():
- options = ParseOptions()
-- with open(options.input, 'r') as input_stream:
-+ with open(options.input, 'rb') as input_stream:
- (emoji_data_list, token_dict) = ReadEmojiTsv(input_stream)
-
- OutputData(emoji_data_list, token_dict,
---- /src/rewriter/gen_reading_correction_data.py
-+++ /src/rewriter/gen_reading_correction_data.py
-@@ -63,7 +63,7 @@
- def WriteData(input_path, output_value_array_path, output_error_array_path,
- output_correction_array_path):
- outputs = []
-- with open(input_path) as input_stream:
-+ with open(input_path, 'rb') as input_stream:
- input_stream = code_generator_util.SkipLineComment(input_stream)
- input_stream = code_generator_util.ParseColumnStream(input_stream,
- num_column=3)
-@@ -73,7 +73,7 @@
-
- # In order to lookup the entries via |error| with binary search,
- # sort outputs here.
-- outputs.sort(lambda x, y: cmp(x[1], y[1]) or cmp(x[0], y[0]))
-+ outputs.sort(key=lambda x: (x[1], x[0]))
-
- serialized_string_array_builder.SerializeToFile(
- [value for (value, _, _) in outputs], output_value_array_path)
---- /src/rewriter/gen_single_kanji_rewriter_data.py
-+++ /src/rewriter/gen_single_kanji_rewriter_data.py
-@@ -52,7 +52,7 @@
- stream = code_generator_util.ParseColumnStream(stream, num_column=2)
- outputs = list(stream)
- # For binary search by |key|, sort outputs here.
-- outputs.sort(lambda x, y: cmp(x[0], y[0]))
-+ outputs.sort(key=lambda x: x[0])
-
- return outputs
-
-@@ -72,7 +72,7 @@
- variant_items.append([target, original, len(variant_types) - 1])
-
- # For binary search by |target|, sort variant items here.
-- variant_items.sort(lambda x, y: cmp(x[0], y[0]))
-+ variant_items.sort(key=lambda x: x[0])
-
- return (variant_types, variant_items)
-
-@@ -151,10 +151,10 @@
- def main():
- options = _ParseOptions()
-
-- with open(options.single_kanji_file, 'r') as single_kanji_stream:
-+ with open(options.single_kanji_file, 'rb') as single_kanji_stream:
- single_kanji = ReadSingleKanji(single_kanji_stream)
-
-- with open(options.variant_file, 'r') as variant_stream:
-+ with open(options.variant_file, 'rb') as variant_stream:
- variant_info = ReadVariant(variant_stream)
-
- WriteSingleKanji(single_kanji,
---- /src/session/gen_session_stress_test_data.py
-+++ /src/session/gen_session_stress_test_data.py
-@@ -50,24 +50,26 @@
- """
- result = ''
- for c in s:
-- hexstr = hex(ord(c))
-+ hexstr = hex(c)
- # because hexstr contains '0x', remove the prefix and add our prefix
- result += '\\x' + hexstr[2:]
- return result
-
- def GenerateHeader(file):
- try:
-- print "const char *kTestSentences[] = {"
-- for line in open(file, "r"):
-- if line.startswith('#'):
-+ print("const char *kTestSentences[] = {")
-+ fh = open(file, "rb")
-+ for line in fh:
-+ if line.startswith(b'#'):
- continue
-- line = line.rstrip('\r\n')
-+ line = line.rstrip(b'\r\n')
- if not line:
- continue
-- print " \"%s\"," % escape_string(line)
-- print "};"
-+ print(" \"%s\"," % escape_string(line))
-+ fh.close()
-+ print("};")
- except:
-- print "cannot open %s" % (file)
-+ print("cannot open %s" % (file))
- sys.exit(1)
-
- def main():
---- /src/unix/ibus/gen_mozc_xml.py
-+++ /src/unix/ibus/gen_mozc_xml.py
-@@ -74,7 +74,7 @@
-
-
- def OutputXmlElement(param_dict, element_name, value):
-- print ' <%s>%s</%s>' % (element_name, (value % param_dict), element_name)
-+ print(' <%s>%s</%s>' % (element_name, (value % param_dict), element_name))
-
-
- def OutputXml(param_dict, component, engine_common, engines, setup_arg):
-@@ -90,26 +90,26 @@
- engines: A dictionary from a property name to a list of property values of
- engines. For example, {'name': ['mozc-jp', 'mozc', 'mozc-dv']}.
- """
-- print '<component>'
-- for key in component:
-+ print('<component>')
-+ for key in sorted(component):
- OutputXmlElement(param_dict, key, component[key])
-- print '<engines>'
-+ print('<engines>')
- for i in range(len(engines['name'])):
-- print '<engine>'
-- for key in engine_common:
-+ print('<engine>')
-+ for key in sorted(engine_common):
- OutputXmlElement(param_dict, key, engine_common[key])
- if setup_arg:
- OutputXmlElement(param_dict, 'setup', ' '.join(setup_arg))
-- for key in engines:
-+ for key in sorted(engines):
- OutputXmlElement(param_dict, key, engines[key][i])
-- print '</engine>'
-- print '</engines>'
-- print '</component>'
-+ print('</engine>')
-+ print('</engines>')
-+ print('</component>')
-
-
- def OutputCppVariable(param_dict, prefix, variable_name, value):
-- print 'const char k%s%s[] = "%s";' % (prefix, variable_name.capitalize(),
-- (value % param_dict))
-+ print('const char k%s%s[] = "%s";' % (prefix, variable_name.capitalize(),
-+ (value % param_dict)))
-
-
- def OutputCpp(param_dict, component, engine_common, engines):
-@@ -122,18 +122,18 @@
- engines: ditto.
- """
- guard_name = 'MOZC_UNIX_IBUS_MAIN_H_'
-- print CPP_HEADER % (guard_name, guard_name)
-- for key in component:
-+ print(CPP_HEADER % (guard_name, guard_name))
-+ for key in sorted(component):
- OutputCppVariable(param_dict, 'Component', key, component[key])
-- for key in engine_common:
-+ for key in sorted(engine_common):
- OutputCppVariable(param_dict, 'Engine', key, engine_common[key])
-- for key in engines:
-- print 'const char* kEngine%sArray[] = {' % key.capitalize()
-+ for key in sorted(engines):
-+ print('const char* kEngine%sArray[] = {' % key.capitalize())
- for i in range(len(engines[key])):
-- print '"%s",' % (engines[key][i] % param_dict)
-- print '};'
-- print 'const size_t kEngineArrayLen = %s;' % len(engines['name'])
-- print CPP_FOOTER % guard_name
-+ print('"%s",' % (engines[key][i] % param_dict))
-+ print('};')
-+ print('const size_t kEngineArrayLen = %s;' % len(engines['name']))
-+ print(CPP_FOOTER % guard_name)
-
-
- def CheckIBusVersion(options, minimum_version):
---- /src/usage_stats/gen_stats_list.py
-+++ /src/usage_stats/gen_stats_list.py
-@@ -37,23 +37,24 @@
-
- def GetStatsNameList(filename):
- stats = []
-- for line in open(filename, 'r'):
-- stat = line.strip()
-- if not stat or stat[0] == '#':
-- continue
-- stats.append(stat)
-+ with open(filename, 'r') as file:
-+ for line in file:
-+ stat = line.strip()
-+ if not stat or stat[0] == '#':
-+ continue
-+ stats.append(stat)
- return stats
-
-
- def main():
- stats_list = GetStatsNameList(sys.argv[1])
-- print '// This header file is generated by gen_stats_list.py'
-+ print('// This header file is generated by gen_stats_list.py')
- for stats in stats_list:
-- print 'const char k%s[] = "%s";' % (stats, stats)
-- print 'const char *kStatsList[] = {'
-+ print('const char k%s[] = "%s";' % (stats, stats))
-+ print('const char *kStatsList[] = {')
- for stats in stats_list:
-- print ' k%s,' % (stats)
-- print '};'
-+ print(' k%s,' % (stats))
-+ print('};')
-
-
- if __name__ == '__main__':
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-reiwa.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-reiwa.patch
deleted file mode 100644
index 561a989a41d3..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-reiwa.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-https://github.com/google/mozc/issues/461
-
---- /src/data/symbol/categorized.tsv
-+++ /src/data/symbol/categorized.tsv
-@@ -405,6 +405,7 @@
- ㍽ MATH たいしょう ねんごう
- ㍼ MATH しょうわ ねんごう
- ㍻ MATH へいせい ねんごう
-+㋿ MATH れいわ ねんごう
-
- ㌣ MATH せんと たんい
- ㌦ MATH どる たんい
---- /src/data/symbol/symbol.tsv
-+++ /src/data/symbol/symbol.tsv
-@@ -528,6 +528,7 @@
- 記号 ㊦ した げ 丸下
- 記号 ㊧ ひだり 丸左
- 記号 ㊨ みぎ 丸右
-+記号 ㋿ れいわ ねんごう 年号 OTHER
- 記号 ㍻ へいせい ねんごう 年号 OTHER
- 記号 ㍼ しょうわ ねんごう 年号 OTHER
- 記号 ㍽ たいしょう ねんごう 年号 OTHER
---- /src/rewriter/date_rewriter.cc
-+++ /src/rewriter/date_rewriter.cc
-@@ -1239,6 +1239,10 @@
- 1989,
- "平成",
- "へいせい",
-+ }, {
-+ 2019,
-+ "令和",
-+ "れいわ",
- }
- };
-
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-server_path_check.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-server_path_check.patch
deleted file mode 100644
index dd606e27fb56..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-server_path_check.patch
+++ /dev/null
@@ -1,95 +0,0 @@
-https://github.com/google/mozc/issues/471
-
---- /src/ipc/ipc_path_manager.cc
-+++ /src/ipc/ipc_path_manager.cc
-@@ -332,9 +332,21 @@
- return false;
- }
-
-+ // Expand symbolic links in the expected server path to avoid false negatives
-+ // during comparisons of the expected server path and the actual server path.
-+ string real_server_path = server_path;
-+#ifndef OS_WIN
-+ char real_server_path_[PATH_MAX];
-+ if (realpath(server_path.c_str(), real_server_path_) == NULL) {
-+ LOG(ERROR) << "realpath failed: " << strerror(errno);
-+ return false;
-+ }
-+ real_server_path = real_server_path_;
-+#endif
-+
- // compare path name
- if (pid == server_pid_) {
-- return (server_path == server_path_);
-+ return (real_server_path == server_path_);
- }
-
- server_pid_ = 0;
-@@ -344,17 +356,17 @@
- {
- std::wstring expected_server_ntpath;
- const std::map<string, std::wstring>::const_iterator it =
-- expected_server_ntpath_cache_.find(server_path);
-+ expected_server_ntpath_cache_.find(real_server_path);
- if (it != expected_server_ntpath_cache_.end()) {
- expected_server_ntpath = it->second;
- } else {
- std::wstring wide_server_path;
-- Util::UTF8ToWide(server_path, &wide_server_path);
-+ Util::UTF8ToWide(real_server_path, &wide_server_path);
- if (WinUtil::GetNtPath(wide_server_path, &expected_server_ntpath)) {
-- // Caches the relationship from |server_path| to
-- // |expected_server_ntpath| in case |server_path| is renamed later.
-+ // Caches the relationship from |real_server_path| to
-+ // |expected_server_ntpath| in case |real_server_path| is renamed later.
- // (This can happen during the updating).
-- expected_server_ntpath_cache_[server_path] = expected_server_ntpath;
-+ expected_server_ntpath_cache_[real_server_path] = expected_server_ntpath;
- }
- }
-
-@@ -371,9 +383,9 @@
- return false;
- }
-
-- // Here we can safely assume that |server_path| (expected one) should be
-+ // Here we can safely assume that |real_server_path| (expected one) should be
- // the same to |server_path_| (actual one).
-- server_path_ = server_path;
-+ server_path_ = real_server_path;
- server_pid_ = pid;
- }
- #endif // OS_WIN
-@@ -399,7 +411,7 @@
- #ifdef OS_LINUX
- // load from /proc/<pid>/exe
- char proc[128];
-- char filename[512];
-+ char filename[PATH_MAX];
- snprintf(proc, sizeof(proc) - 1, "/proc/%u/exe", pid);
- const ssize_t size = readlink(proc, filename, sizeof(filename) - 1);
- if (size == -1) {
-@@ -412,18 +424,18 @@
- server_pid_ = pid;
- #endif // OS_LINUX
-
-- VLOG(1) << "server path: " << server_path << " " << server_path_;
-- if (server_path == server_path_) {
-+ VLOG(1) << "server path: " << real_server_path << " " << server_path_;
-+ if (real_server_path == server_path_) {
- return true;
- }
-
- #ifdef OS_LINUX
-- if ((server_path + " (deleted)") == server_path_) {
-- LOG(WARNING) << server_path << " on disk is modified";
-+ if ((real_server_path + " (deleted)") == server_path_) {
-+ LOG(WARNING) << real_server_path << " on disk is modified";
- // If a user updates the server binary on disk during the server is running,
- // "readlink /proc/<pid>/exe" returns a path with the " (deleted)" suffix.
- // We allow the special case.
-- server_path_ = server_path;
-+ server_path_ = real_server_path;
- return true;
- }
- #endif // OS_LINUX
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-system_libraries.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-system_libraries.patch
deleted file mode 100644
index 064b910c7026..000000000000
--- a/app-i18n/mozc/files/mozc-2.23.2815.102-system_libraries.patch
+++ /dev/null
@@ -1,274 +0,0 @@
---- /src/gyp/defines.gypi
-+++ /src/gyp/defines.gypi
-@@ -71,6 +71,12 @@
- # use_libibus represents if ibus library is used or not.
- # This option is only for Linux.
- 'use_libibus%': '0',
-+
-+ # use_libgtest represents if gtest library is used or not.
-+ 'use_libgtest%': '0',
-+
-+ # use_libjsoncpp represents if jsoncpp library is used or not.
-+ 'use_libjsoncpp%': '0',
- },
- 'target_defaults': {
- 'defines': [
---- /src/net/jsoncpp.gyp
-+++ /src/net/jsoncpp.gyp
-@@ -31,32 +31,57 @@
- 'targets': [
- {
- 'target_name': 'jsoncpp',
-- 'type': 'static_library',
-- 'variables': {
-- 'jsoncpp_root': '<(third_party_dir)/jsoncpp',
-- 'jsoncpp_srcs': [
-- '<(jsoncpp_root)/src/lib_json/json_reader.cpp',
-- '<(jsoncpp_root)/src/lib_json/json_value.cpp',
-- '<(jsoncpp_root)/src/lib_json/json_writer.cpp',
-- ],
-- 'jsoncpp_include_dirs': ['<(jsoncpp_root)/include'],
-- 'jsoncpp_additional_macros': ['JSON_USE_EXCEPTION=0'],
-- },
-- 'defines': [
-- '<@(jsoncpp_additional_macros)',
-+ 'conditions': [
-+ ['use_libjsoncpp==1', {
-+ 'type': 'none',
-+ 'variables': {
-+ 'jsoncpp_additional_macros': ['JSON_USE_EXCEPTION=0'],
-+ },
-+ 'all_dependent_settings': {
-+ 'defines': [
-+ '<@(jsoncpp_additional_macros)',
-+ ],
-+ 'cflags': [
-+ '<!@(pkg-config --cflags jsoncpp)',
-+ ],
-+ 'link_settings': {
-+ 'libraries': [
-+ '<!@(pkg-config --libs-only-l jsoncpp)',
-+ ],
-+ 'ldflags': [
-+ '<!@(pkg-config --libs-only-L jsoncpp)',
-+ ],
-+ }
-+ },
-+ }, {
-+ 'type': 'static_library',
-+ 'variables': {
-+ 'jsoncpp_root': '<(third_party_dir)/jsoncpp',
-+ 'jsoncpp_srcs': [
-+ '<(jsoncpp_root)/src/lib_json/json_reader.cpp',
-+ '<(jsoncpp_root)/src/lib_json/json_value.cpp',
-+ '<(jsoncpp_root)/src/lib_json/json_writer.cpp',
-+ ],
-+ 'jsoncpp_include_dirs': ['<(jsoncpp_root)/include'],
-+ 'jsoncpp_additional_macros': ['JSON_USE_EXCEPTION=0'],
-+ },
-+ 'defines': [
-+ '<@(jsoncpp_additional_macros)',
-+ ],
-+ 'sources': [
-+ '<@(jsoncpp_srcs)',
-+ 'jsoncpp.h',
-+ ],
-+ 'include_dirs': [
-+ '<@(jsoncpp_include_dirs)',
-+ ],
-+ 'all_dependent_settings': {
-+ 'defines': [
-+ '<@(jsoncpp_additional_macros)',
-+ ],
-+ },
-+ }],
- ],
-- 'sources': [
-- '<@(jsoncpp_srcs)',
-- 'jsoncpp.h',
-- ],
-- 'include_dirs': [
-- '<@(jsoncpp_include_dirs)',
-- ],
-- 'all_dependent_settings': {
-- 'defines': [
-- '<@(jsoncpp_additional_macros)',
-- ],
-- },
- },
- ],
- }
---- /src/net/jsoncpp.h
-+++ /src/net/jsoncpp.h
-@@ -35,7 +35,7 @@
- // Mozc basically disables C++ exception.
- #define JSON_USE_EXCEPTION 0
- #endif // !JSON_USE_EXCEPTION
--#include "third_party/jsoncpp/include/json/json.h"
-+#include <json/json.h>
- #define MOZC_JSONCPP_JSON_H_INCLUDED
- #endif // !MOZC_JSONCPP_JSON_H_INCLUDED
-
---- /src/testing/testing.gyp
-+++ /src/testing/testing.gyp
-@@ -53,66 +53,101 @@
- 'targets': [
- {
- 'target_name': 'testing',
-- 'type': 'static_library',
-- 'variables': {
-- 'gtest_defines': [
-- 'GTEST_LANG_CXX11=1',
-- 'GTEST_HAS_TR1_TUPLE=0', # disable tr1 tuple in favor of C++11 tuple.
-- ],
-- 'gtest_dir': '<(third_party_dir)/gtest/googletest',
-- 'gmock_dir': '<(third_party_dir)/gtest/googlemock',
-- 'conditions': [
-- ['_toolset=="target" and target_platform=="Android"', {
-- 'gtest_defines': [
-- 'GTEST_HAS_RTTI=0', # Android NDKr7 requires this.
-- 'GTEST_HAS_CLONE=0',
-- 'GTEST_HAS_GLOBAL_WSTRING=0',
-- 'GTEST_HAS_POSIX_RE=0',
-- 'GTEST_HAS_STD_WSTRING=0',
-- 'GTEST_OS_LINUX=1',
-- 'GTEST_OS_LINUX_ANDROID=1',
-- ],
-- }],
-- ],
-- },
-- 'sources': [
-- '<(gmock_dir)/src/gmock-cardinalities.cc',
-- '<(gmock_dir)/src/gmock-internal-utils.cc',
-- '<(gmock_dir)/src/gmock-matchers.cc',
-- '<(gmock_dir)/src/gmock-spec-builders.cc',
-- '<(gmock_dir)/src/gmock.cc',
-- '<(gtest_dir)/src/gtest-death-test.cc',
-- '<(gtest_dir)/src/gtest-filepath.cc',
-- '<(gtest_dir)/src/gtest-port.cc',
-- '<(gtest_dir)/src/gtest-printers.cc',
-- '<(gtest_dir)/src/gtest-test-part.cc',
-- '<(gtest_dir)/src/gtest-typed-test.cc',
-- '<(gtest_dir)/src/gtest.cc',
-- ],
-- 'include_dirs': [
-- '<(gmock_dir)',
-- '<(gmock_dir)/include',
-- '<(gtest_dir)',
-- '<(gtest_dir)/include',
-- ],
-- 'defines': [
-- '<@(gtest_defines)',
-- ],
-- 'all_dependent_settings': {
-- 'defines': [
-- '<@(gtest_defines)',
-- ],
-- 'include_dirs': [
-- '<(gmock_dir)/include',
-- '<(gtest_dir)/include',
-- ],
-- },
- 'conditions': [
-- ['(_toolset=="target" and compiler_target=="clang") or '
-- '(_toolset=="host" and compiler_host=="clang")', {
-- 'cflags': [
-- '-Wno-missing-field-initializers',
-- '-Wno-unused-private-field',
-+ ['use_libgtest==1', {
-+ 'type': 'none',
-+ 'variables': {
-+ 'gtest_defines': [
-+ 'GTEST_LANG_CXX11=1',
-+ 'GTEST_HAS_TR1_TUPLE=0', # disable tr1 tuple in favor of C++11 tuple.
-+ ],
-+ 'conditions': [
-+ ['_toolset=="target" and target_platform=="Android"', {
-+ 'gtest_defines': [
-+ 'GTEST_HAS_RTTI=0', # Android NDKr7 requires this.
-+ 'GTEST_HAS_CLONE=0',
-+ 'GTEST_HAS_GLOBAL_WSTRING=0',
-+ 'GTEST_HAS_POSIX_RE=0',
-+ 'GTEST_HAS_STD_WSTRING=0',
-+ 'GTEST_OS_LINUX=1',
-+ 'GTEST_OS_LINUX_ANDROID=1',
-+ ],
-+ }],
-+ ],
-+ },
-+ 'all_dependent_settings': {
-+ 'defines': [
-+ '<@(gtest_defines)',
-+ ],
-+ 'link_settings': {
-+ 'libraries': [
-+ '-lgmock -lgtest',
-+ ],
-+ },
-+ },
-+ }, {
-+ 'type': 'static_library',
-+ 'variables': {
-+ 'gtest_defines': [
-+ 'GTEST_LANG_CXX11=1',
-+ 'GTEST_HAS_TR1_TUPLE=0', # disable tr1 tuple in favor of C++11 tuple.
-+ ],
-+ 'gtest_dir': '<(third_party_dir)/gtest/googletest',
-+ 'gmock_dir': '<(third_party_dir)/gtest/googlemock',
-+ 'conditions': [
-+ ['_toolset=="target" and target_platform=="Android"', {
-+ 'gtest_defines': [
-+ 'GTEST_HAS_RTTI=0', # Android NDKr7 requires this.
-+ 'GTEST_HAS_CLONE=0',
-+ 'GTEST_HAS_GLOBAL_WSTRING=0',
-+ 'GTEST_HAS_POSIX_RE=0',
-+ 'GTEST_HAS_STD_WSTRING=0',
-+ 'GTEST_OS_LINUX=1',
-+ 'GTEST_OS_LINUX_ANDROID=1',
-+ ],
-+ }],
-+ ],
-+ },
-+ 'sources': [
-+ '<(gmock_dir)/src/gmock-cardinalities.cc',
-+ '<(gmock_dir)/src/gmock-internal-utils.cc',
-+ '<(gmock_dir)/src/gmock-matchers.cc',
-+ '<(gmock_dir)/src/gmock-spec-builders.cc',
-+ '<(gmock_dir)/src/gmock.cc',
-+ '<(gtest_dir)/src/gtest-death-test.cc',
-+ '<(gtest_dir)/src/gtest-filepath.cc',
-+ '<(gtest_dir)/src/gtest-port.cc',
-+ '<(gtest_dir)/src/gtest-printers.cc',
-+ '<(gtest_dir)/src/gtest-test-part.cc',
-+ '<(gtest_dir)/src/gtest-typed-test.cc',
-+ '<(gtest_dir)/src/gtest.cc',
-+ ],
-+ 'include_dirs': [
-+ '<(gmock_dir)',
-+ '<(gmock_dir)/include',
-+ '<(gtest_dir)',
-+ '<(gtest_dir)/include',
-+ ],
-+ 'defines': [
-+ '<@(gtest_defines)',
-+ ],
-+ 'all_dependent_settings': {
-+ 'defines': [
-+ '<@(gtest_defines)',
-+ ],
-+ 'include_dirs': [
-+ '<(gmock_dir)/include',
-+ '<(gtest_dir)/include',
-+ ],
-+ },
-+ 'conditions': [
-+ ['(_toolset=="target" and compiler_target=="clang") or '
-+ '(_toolset=="host" and compiler_host=="clang")', {
-+ 'cflags': [
-+ '-Wno-missing-field-initializers',
-+ '-Wno-unused-private-field',
-+ ],
-+ }],
- ],
- }],
- ],
diff --git a/app-i18n/mozc/metadata.xml b/app-i18n/mozc/metadata.xml
index db3170ba53c6..7fd885c8d62f 100644
--- a/app-i18n/mozc/metadata.xml
+++ b/app-i18n/mozc/metadata.xml
@@ -12,8 +12,6 @@
<use>
<flag name="fcitx4">Enable support for <pkg>app-i18n/fcitx</pkg> 4</flag>
<flag name="gui">Install graphical user interface tool (mozc_tool)</flag>
- <flag name="handwriting-tegaki">Use handwriting recognition model from <pkg>app-i18n/tegaki-zinnia-japanese</pkg> by default</flag>
- <flag name="handwriting-tomoe">Use handwriting recognition model from <pkg>app-i18n/zinnia-tomoe</pkg> by default</flag>
<flag name="ibus">Enable support for <pkg>app-i18n/ibus</pkg></flag>
<flag name="renderer">Enable native candidate window</flag>
</use>
diff --git a/app-i18n/mozc/mozc-2.23.2815.102.ebuild b/app-i18n/mozc/mozc-2.23.2815.102.ebuild
deleted file mode 100644
index b859d8cf38a2..000000000000
--- a/app-i18n/mozc/mozc-2.23.2815.102.ebuild
+++ /dev/null
@@ -1,369 +0,0 @@
-# Copyright 2010-2022 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI="8"
-PYTHON_COMPAT=( python3_{8..10} )
-
-inherit elisp-common multiprocessing python-any-r1 toolchain-funcs
-
-if [[ "${PV}" == "9999" ]]; then
- inherit git-r3
-
- EGIT_REPO_URI="https://github.com/google/mozc"
- EGIT_SUBMODULES=(src/third_party/japanese_usage_dictionary)
-else
- MOZC_GIT_REVISION="afb03ddfe72dde4cf2409863a3bfea160f7a66d8"
- JAPANESE_USAGE_DICTIONARY_GIT_REVISION="e5b3425575734c323e1d947009dd74709437b684"
- JAPANESE_USAGE_DICTIONARY_DATE="20120416091336"
- FCITX_PATCH_VERSION="2.23.2815.102.1"
-fi
-
-DESCRIPTION="Mozc - Japanese input method editor"
-HOMEPAGE="https://github.com/google/mozc"
-if [[ "${PV}" == "9999" ]]; then
- SRC_URI=""
-else
- SRC_URI="https://github.com/google/${PN}/archive/${MOZC_GIT_REVISION}.tar.gz -> ${P}.tar.gz
- https://github.com/hiroyuki-komatsu/japanese-usage-dictionary/archive/${JAPANESE_USAGE_DICTIONARY_GIT_REVISION}.tar.gz -> japanese-usage-dictionary-${JAPANESE_USAGE_DICTIONARY_DATE}.tar.gz
- https://dev.gentoo.org/~juippis/distfiles/tmp/mozc-2.23.2815.102-protobuf_generated_classes_no_inheritance.patch
- fcitx4? ( https://download.fcitx-im.org/fcitx-mozc/fcitx-mozc-${FCITX_PATCH_VERSION}.patch )"
-fi
-
-# Mozc: BSD
-# src/data/dictionary_oss: ipadic, public-domain
-# src/data/unicode: unicode
-# japanese-usage-dictionary: BSD-2
-LICENSE="BSD BSD-2 ipadic public-domain unicode"
-SLOT="0"
-KEYWORDS="amd64 ~arm64 ~ppc64 x86"
-IUSE="debug emacs fcitx4 +gui +handwriting-tegaki handwriting-tomoe ibus renderer test"
-REQUIRED_USE="|| ( emacs fcitx4 ibus ) gui? ( ^^ ( handwriting-tegaki handwriting-tomoe ) ) !gui? ( !handwriting-tegaki !handwriting-tomoe )"
-RESTRICT="!test? ( test )"
-
-BDEPEND="${PYTHON_DEPS}
- >=dev-libs/protobuf-3.0.0
- dev-util/gyp
- dev-util/ninja
- virtual/pkgconfig
- emacs? ( app-editors/emacs:* )
- fcitx4? ( sys-devel/gettext )"
-RDEPEND=">=dev-libs/protobuf-3.0.0:=
- emacs? ( app-editors/emacs:* )
- fcitx4? (
- app-i18n/fcitx:4
- virtual/libintl
- )
- gui? (
- app-i18n/zinnia
- dev-qt/qtcore:5
- dev-qt/qtgui:5
- dev-qt/qtwidgets:5
- handwriting-tegaki? ( app-i18n/tegaki-zinnia-japanese )
- handwriting-tomoe? ( app-i18n/zinnia-tomoe )
- )
- ibus? (
- >=app-i18n/ibus-1.4.1
- dev-libs/glib:2
- x11-libs/libxcb
- )
- renderer? (
- dev-libs/glib:2
- x11-libs/cairo
- x11-libs/gtk+:2
- x11-libs/pango
- )"
-DEPEND="${RDEPEND}
- test? (
- >=dev-cpp/gtest-1.8.0
- dev-libs/jsoncpp
- )"
-
-S="${WORKDIR}/${P}/src"
-
-SITEFILE="50${PN}-gentoo.el"
-
-execute() {
- einfo "$@"
- "$@"
-}
-
-src_unpack() {
- if [[ "${PV}" == "9999" ]]; then
- git-r3_src_unpack
-
- if use fcitx4; then
- local EGIT_SUBMODULES=()
- git-r3_fetch https://github.com/fcitx/mozc refs/heads/fcitx
- git-r3_checkout https://github.com/fcitx/mozc "${WORKDIR}/fcitx-mozc"
- fi
- else
- unpack ${P}.tar.gz
- mv mozc-${MOZC_GIT_REVISION} ${P} || die
-
- unpack japanese-usage-dictionary-${JAPANESE_USAGE_DICTIONARY_DATE}.tar.gz
- cp -p japanese-usage-dictionary-${JAPANESE_USAGE_DICTIONARY_GIT_REVISION}/usage_dict.txt ${P}/src/third_party/japanese_usage_dictionary || die
- fi
-}
-
-src_prepare() {
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_1.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_2.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_3.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_4.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-system_libraries.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-gcc-8.patch"
- eapply -p2 "${DISTDIR}/${PN}-2.23.2815.102-protobuf_generated_classes_no_inheritance.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-protobuf-3.18.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-environmental_variables.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-reiwa.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-server_path_check.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.20.2673.102-tests_build.patch"
- eapply -p2 "${FILESDIR}/${PN}-2.20.2673.102-tests_skipping.patch"
-
- if use fcitx4; then
- if [[ "${PV}" == "9999" ]]; then
- cp -pr "${WORKDIR}/fcitx-mozc/src/unix/fcitx" unix || die
- else
- eapply -p2 "${DISTDIR}/fcitx-mozc-${FCITX_PATCH_VERSION}.patch"
- fi
- fi
-
- eapply_user
-
- sed \
- -e "s/def GypMain(options, unused_args):/def GypMain(options, gyp_args):/" \
- -e "s/RunOrDie(gyp_command + gyp_options)/RunOrDie(gyp_command + gyp_options + gyp_args)/" \
- -e "s/RunOrDie(\[ninja/&, '-j$(makeopts_jobs)', '-l$(makeopts_loadavg "${MAKEOPTS}" 0)', '-v'/" \
- -i build_mozc.py || die
-
- sed \
- -e "s/'release_extra_cflags%': \['-O2'\]/'release_extra_cflags%': []/" \
- -e "s/'debug_extra_cflags%': \['-O0', '-g'\]/'debug_extra_cflags%': []/" \
- -i gyp/common.gypi || die
-
- local ar=($(tc-getAR))
- local cc=($(tc-getCC))
- local cxx=($(tc-getCXX))
- local ld=($(tc-getLD))
- local nm=($(tc-getNM))
- local readelf=($(tc-getPROG READELF readelf))
-
- # Use absolute paths. Non-absolute paths are mishandled by GYP.
- ar[0]=$(type -P ${ar[0]})
- cc[0]=$(type -P ${cc[0]})
- cxx[0]=$(type -P ${cxx[0]})
- ld[0]=$(type -P ${ld[0]})
- nm[0]=$(type -P ${nm[0]})
- readelf[0]=$(type -P ${readelf[0]})
-
- sed \
- -e "s:<!(which ar):${ar[@]}:" \
- -e "s:<!(which clang):${cc[@]}:" \
- -e "s:<!(which clang++):${cxx[@]}:" \
- -e "s:<!(which ld):${ld[@]}:" \
- -e "s:<!(which nm):${nm[@]}:" \
- -e "s:<!(which readelf):${readelf[@]}:" \
- -i gyp/common.gypi || die
-}
-
-src_configure() {
- if use debug; then
- BUILD_TYPE="Debug"
- else
- BUILD_TYPE="Release"
- fi
-
- local gyp_arguments=()
-
- if tc-is-gcc; then
- gyp_arguments+=(-D compiler_host=gcc -D compiler_target=gcc)
- elif tc-is-clang; then
- gyp_arguments+=(-D compiler_host=clang -D compiler_target=clang)
- else
- gyp_arguments+=(-D compiler_host=unknown -D compiler_target=unknown)
- fi
-
- gyp_arguments+=(-D use_fcitx=$(usex fcitx4 YES NO))
- gyp_arguments+=(-D use_fcitx5=NO)
- gyp_arguments+=(-D use_libgtest=$(usex test 1 0))
- gyp_arguments+=(-D use_libibus=$(usex ibus 1 0))
- gyp_arguments+=(-D use_libjsoncpp=$(usex test 1 0))
- gyp_arguments+=(-D use_libprotobuf=1)
- gyp_arguments+=(-D use_libzinnia=$(usex gui 1 0))
- gyp_arguments+=(-D enable_gtk_renderer=$(usex renderer 1 0))
-
- gyp_arguments+=(-D server_dir="${EPREFIX}/usr/libexec/mozc")
- gyp_arguments+=(-D document_dir="${EPREFIX}/usr/libexec/mozc/documents")
-
- if use handwriting-tegaki; then
- gyp_arguments+=(-D zinnia_model_file="${EPREFIX}/usr/share/tegaki/models/zinnia/handwriting-ja.model")
- elif use handwriting-tomoe; then
- gyp_arguments+=(-D zinnia_model_file="${EPREFIX}/usr/$(get_libdir)/zinnia/model/tomoe/handwriting-ja.model")
- fi
-
- if use ibus; then
- gyp_arguments+=(-D ibus_mozc_path="${EPREFIX}/usr/libexec/ibus-engine-mozc")
- gyp_arguments+=(-D ibus_mozc_icon_path="${EPREFIX}/usr/share/ibus-mozc/product_icon.png")
- fi
-
- unset AR CC CXX LD NM READELF
-
- execute "${PYTHON}" build_mozc.py gyp \
- --gypdir="${EPREFIX}/usr/bin" \
- --server_dir="${EPREFIX}/usr/libexec/mozc" \
- --verbose \
- $(usex gui "" --noqt) \
- -- "${gyp_arguments[@]}" || die "Configuration failed"
-}
-
-src_compile() {
- local targets=(server/server.gyp:mozc_server)
- if use emacs; then
- targets+=(unix/emacs/emacs.gyp:mozc_emacs_helper)
- fi
- if use fcitx4; then
- targets+=(unix/fcitx/fcitx.gyp:fcitx-mozc)
- fi
- if use gui; then
- targets+=(gui/gui.gyp:mozc_tool)
- fi
- if use ibus; then
- targets+=(unix/ibus/ibus.gyp:ibus_mozc)
- fi
- if use renderer; then
- targets+=(renderer/renderer.gyp:mozc_renderer)
- fi
- if use test; then
- targets+=(gyp/tests.gyp:unittests)
- fi
-
- execute "${PYTHON}" build_mozc.py build -c ${BUILD_TYPE} -v "${targets[@]}" || die "Building failed"
-
- if use emacs; then
- elisp-compile unix/emacs/*.el
- fi
-}
-
-src_test() {
- execute "${PYTHON}" build_mozc.py runtests -c ${BUILD_TYPE} --test_jobs 1 || die "Testing failed"
-}
-
-src_install() {
- exeinto /usr/libexec/mozc
- doexe out_linux/${BUILD_TYPE}/mozc_server
-
- if use gui; then
- doexe out_linux/${BUILD_TYPE}/mozc_tool
- fi
-
- if use renderer; then
- doexe out_linux/${BUILD_TYPE}/mozc_renderer
- fi
-
- insinto /usr/libexec/mozc/documents
- doins data/installer/credits_en.html
-
- if use emacs; then
- dobin out_linux/${BUILD_TYPE}/mozc_emacs_helper
- elisp-install ${PN} unix/emacs/*.{el,elc}
- elisp-site-file-install "${FILESDIR}/${SITEFILE}" ${PN}
- fi
-
- if use fcitx4; then
- exeinto /usr/$(get_libdir)/fcitx
- doexe out_linux/${BUILD_TYPE}/fcitx-mozc.so
-
- insinto /usr/share/fcitx/addon
- doins unix/fcitx/fcitx-mozc.conf
-
- insinto /usr/share/fcitx/inputmethod
- doins unix/fcitx/mozc.conf
-
- insinto /usr/share/fcitx/mozc/icon
- newins data/images/product_icon_32bpp-128.png mozc.png
- local image
- for image in data/images/unix/ui-*.png; do
- newins "${image}" "mozc-${image#data/images/unix/ui-}"
- done
-
- local locale mo_file
- for mo_file in out_linux/${BUILD_TYPE}/gen/unix/fcitx/po/*.mo; do
- locale="${mo_file##*/}"
- locale="${locale%.mo}"
- insinto /usr/share/locale/${locale}/LC_MESSAGES
- newins "${mo_file}" fcitx-mozc.mo
- done
- fi
-
- if use ibus; then
- exeinto /usr/libexec
- newexe out_linux/${BUILD_TYPE}/ibus_mozc ibus-engine-mozc
-
- insinto /usr/share/ibus/component
- doins out_linux/${BUILD_TYPE}/gen/unix/ibus/mozc.xml
-
- insinto /usr/share/ibus-mozc
- newins data/images/unix/ime_product_icon_opensource-32.png product_icon.png
- local image
- for image in data/images/unix/ui-*.png; do
- newins "${image}" "${image#data/images/unix/ui-}"
- done
- fi
-}
-
-pkg_postinst() {
- elog
- elog "ENVIRONMENTAL VARIABLES"
- elog
- elog "MOZC_SERVER_DIRECTORY"
- elog " Mozc server directory"
- elog " Value used by default: \"${EPREFIX}/usr/libexec/mozc\""
- elog "MOZC_DOCUMENTS_DIRECTORY"
- elog " Mozc documents directory"
- elog " Value used by default: \"${EPREFIX}/usr/libexec/mozc/documents\""
- elog "MOZC_CONFIGURATION_DIRECTORY"
- elog " Mozc configuration directory"
- elog " Value used by default: \"~/.mozc\""
- if use gui; then
- elog "MOZC_ZINNIA_MODEL_FILE"
- elog " Zinnia handwriting recognition model file"
- if use handwriting-tegaki; then
- elog " Value used by default: \"${EPREFIX}/usr/share/tegaki/models/zinnia/handwriting-ja.model\""
- elif use handwriting-tomoe; then
- elog " Value used by default: \"${EPREFIX}/usr/$(get_libdir)/zinnia/model/tomoe/handwriting-ja.model\""
- fi
- elog " Potential values:"
- elog " \"${EPREFIX}/usr/share/tegaki/models/zinnia/handwriting-ja.model\""
- elog " \"${EPREFIX}/usr/$(get_libdir)/zinnia/model/tomoe/handwriting-ja.model\""
- fi
- elog
- if use emacs; then
- elog
- elog "USAGE IN EMACS"
- elog
- elog "mozc-mode is minor mode to input Japanese text using Mozc server."
- elog "mozc-mode can be used via LEIM (Library of Emacs Input Method)."
- elog
- elog "In order to use mozc-mode by default, the following settings should be added to"
- elog "Emacs init file (~/.emacs.d/init.el or ~/.emacs):"
- elog
- elog " (require 'mozc)"
- elog " (set-language-environment \"Japanese\")"
- elog " (setq default-input-method \"japanese-mozc\")"
- elog
- elog "With the above settings, typing C-\\ (which is bound to \"toggle-input-method\""
- elog "by default) will enable mozc-mode."
- elog
- elog "Alternatively, at run time, after loading mozc.el, mozc-mode can be activated by"
- elog "calling \"set-input-method\" and entering \"japanese-mozc\"."
- elog
-
- elisp-site-regen
- fi
-}
-
-pkg_postrm() {
- if use emacs; then
- elisp-site-regen
- fi
-}