summaryrefslogtreecommitdiff
path: root/app-i18n
diff options
context:
space:
mode:
Diffstat (limited to 'app-i18n')
-rw-r--r--app-i18n/Manifest.gzbin20662 -> 20669 bytes
-rw-r--r--app-i18n/ibus-libpinyin/Manifest2
-rw-r--r--app-i18n/ibus-libpinyin/ibus-libpinyin-1.11.1.ebuild7
-rw-r--r--app-i18n/libchewing/Manifest4
-rw-r--r--app-i18n/libchewing/libchewing-0.5.1.ebuild4
-rw-r--r--app-i18n/libchewing/libchewing-9999.ebuild4
-rw-r--r--app-i18n/librime/Manifest2
-rw-r--r--app-i18n/librime/librime-1.4.0.ebuild37
-rw-r--r--app-i18n/mozc/Manifest8
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch621
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch600
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch583
-rw-r--r--app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch537
-rw-r--r--app-i18n/mozc/mozc-2.23.2815.102.ebuild8
-rw-r--r--app-i18n/mozc/mozc-9999.ebuild8
-rw-r--r--app-i18n/tagainijisho/Manifest8
-rw-r--r--app-i18n/tagainijisho/tagainijisho-1.2.0_pre20180610092832.ebuild189
-rw-r--r--app-i18n/tagainijisho/tagainijisho-1.2.0_pre20190507124027_p20191001_p20191001.ebuild200
18 files changed, 2372 insertions, 450 deletions
diff --git a/app-i18n/Manifest.gz b/app-i18n/Manifest.gz
index dcf121882f52..bc836733fe3a 100644
--- a/app-i18n/Manifest.gz
+++ b/app-i18n/Manifest.gz
Binary files differ
diff --git a/app-i18n/ibus-libpinyin/Manifest b/app-i18n/ibus-libpinyin/Manifest
index 3b1f91cf248f..fa395fe98d28 100644
--- a/app-i18n/ibus-libpinyin/Manifest
+++ b/app-i18n/ibus-libpinyin/Manifest
@@ -2,5 +2,5 @@ DIST boost.282b1e01f5bc5ae94347474fd8c35cb2f7a7e65d.m4 55907 BLAKE2B 16ac8109445
DIST ibus-libpinyin-1.10.0.tar.gz 1693440 BLAKE2B 5e0d9debddfbb81fcb45ac86d1fe8fb1220a50c527222fb3c3431149231f41dcc59ae87da9ae9485e2585f0e3051ac4d1792407ba62d90e5e0323c482d52318f SHA512 c9fdc28562714bddc889805e1d439a7dd425f721a8236b8f8ec4727ce24e5053693b3e5bd9b06ced22ceb66bc7621715f7ee52e00af3b8b113503a5ce6963ca0
DIST ibus-libpinyin-1.11.1.tar.gz 1728202 BLAKE2B 6e2273c4ff1be76bb8b0e65b915e039c0a181981d20ed94662184e161e8639ad3d548de981d7bbae950ab8b5019bedaed9593e03282e2f9d207f831929d422bc SHA512 48691952a10f4ab545d8e233dc3cd75fc54270c03a687820048152636df26d233ee0fba89e4ec2ccd5a6e410b41e556872e3dd2468d59d44b3440e904b93528b
EBUILD ibus-libpinyin-1.10.0.ebuild 1498 BLAKE2B d448d1f16bf63347a0aaccad094d0f6ad0b89a0e05318f86e2593711f3800f36d2c064ebf0e6c113fb1e3e3800f49b6f070e0140abbb32b5c9f3b591ca6d201a SHA512 fe81e55e6f6b2e7cdc7dc1615b0d0c2435458a9e4e3f44f44633853add01710b904138a2e967a16d94f8633d9623bc21f57477d3f871c2acadb85510b3bda686
-EBUILD ibus-libpinyin-1.11.1.ebuild 1164 BLAKE2B f5d8d47ccf6efde76cb0a3193a54816f1bebd9755196552e1f07bad76224ae7b09beb2f0592d3e040cdc66f0e2da930760fff2c3e0baef843751837a12d6f92b SHA512 7933a6669b7481dd39ea12bf3b23574c7ec6acc369a4fd47518be167554465fab2059fc6e580f9097ee258bb8815380b0bee7a22703192bd49d466d5e040bc84
+EBUILD ibus-libpinyin-1.11.1.ebuild 1196 BLAKE2B d9ee71c07c31dfcfc62ea876e9ecd885c96c73ae0321cbb3de2db15d7f2a6fa51f3a04dd2ec73936ad8a3656293b278fcecf229c21539ddfae9b10248f8ec9d2 SHA512 78cdc402fccc1a9f024a6cd015f9e5b768ebb2e440dc6e346b8c56170e3fd97e60d9b7a1b0feb62722936ebeb7450a19cfc3da83b08a137884279594c84c06fd
MISC metadata.xml 807 BLAKE2B 864c61e2dac53fc0b95541bf79e8f27a183f879bab95574f7cf462bfe276c89c8a5d2b65c899f4734c487feff1113a20ee02debd48b4ff955a24ace70ed5006e SHA512 c92e0e2c47a1272285c3a36cfc18c739df2a69dc911a8849cdc9c517183309908fdae894e88ed7511645b1c90c3611670b046f1a2c7c48fd0ff7c79d2de0a5b0
diff --git a/app-i18n/ibus-libpinyin/ibus-libpinyin-1.11.1.ebuild b/app-i18n/ibus-libpinyin/ibus-libpinyin-1.11.1.ebuild
index 5f64ebaf5818..510770469de5 100644
--- a/app-i18n/ibus-libpinyin/ibus-libpinyin-1.11.1.ebuild
+++ b/app-i18n/ibus-libpinyin/ibus-libpinyin-1.11.1.ebuild
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 2015-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI="7"
@@ -18,15 +18,16 @@ KEYWORDS="~amd64 ~x86"
IUSE="boost lua opencc"
REQUIRED_USE="${PYTHON_REQUIRED_USE}"
-BDEPEND="
+BDEPEND="dev-db/sqlite:3
virtual/pkgconfig"
DEPEND="${PYTHON_DEPS}
app-i18n/ibus[python(+),${PYTHON_USEDEP}]
>=app-i18n/libpinyin-2.1.0:=
+ dev-db/sqlite:3
+ dev-libs/glib:2
dev-python/pygobject:3[${PYTHON_USEDEP}]
virtual/libintl
- dev-db/sqlite:3
boost? ( dev-libs/boost:= )
lua? ( dev-lang/lua:0 )
opencc? ( app-i18n/opencc:= )"
diff --git a/app-i18n/libchewing/Manifest b/app-i18n/libchewing/Manifest
index 12330029963b..d68fefa571b4 100644
--- a/app-i18n/libchewing/Manifest
+++ b/app-i18n/libchewing/Manifest
@@ -1,5 +1,5 @@
AUX libchewing-0.5.1-autoconf-archive-2019.01.06.patch 34944 BLAKE2B 582e2041ff8e99c90df6726ffe0fe7c3f09672ba841def4ded4f367d704a56349da9fe8c555e9b24ec5c82d73774c8690219532c08904cda29a2a33845d2f3d6 SHA512 8e93539fdea3fc6ef01ea070a64d6bdd873ec1c2555fbcb40cc639a22c74a07f8c2936eb100ebb36d146c0e17ce5f80afa8dd3468ff1f5c26a835c3d12b98bb2
DIST libchewing-0.5.1.tar.bz2 3141084 BLAKE2B 28f9e803c8815c0e1b1a1134becffe12f92c1ae24b4b6d4163769d898861fec024db8332befe7130487a72fc20859b6292837e9d68ab1b6477de4cf4f789f2ee SHA512 a6551661bb5e56bba59730b93b373db56af7d42f2ab4881fbfff8727689dd2604d42d2e5c0b04820df66a431dfb5fcb73cc5c9b538615da76198ee3635501c1f
-EBUILD libchewing-0.5.1.ebuild 1313 BLAKE2B b6790434e94f5e82811dfc421c00e23199fe6e6748bebd53bd370a9bd6a3dbc5747e727036de51134607728e60875f11a1df083b2da3f7224f5031aa32fdca0f SHA512 da6256d7a243a8cfb5fcd019deb9dea17fed8cdc54c0c9849efcf5e16cf8b99d3fa3587a9dbfcd22c0d11e3b0a62d6a852c8a3e6957304cbfced95195062fccb
-EBUILD libchewing-9999.ebuild 1282 BLAKE2B 2f634e9bbfbd708e33a907639066f90dacf2e4d68463642223f92fadee8e43940d8a738771dba712d0326fd0689fcef313b3cafe0819fbfe799ea4c31cf6dbf0 SHA512 d31d22a67c4a7f0f5690313b11792fee0e0e3489131361ecacbd0b9e9992f19e3031de13dae03c8d0864c6aeda710663d1f8ad4f48be9b09321481961afb1f06
+EBUILD libchewing-0.5.1.ebuild 1381 BLAKE2B 546fefdea9ca8ca10cce5b11d67733ba33ea7cbb329e305a7a4f09ad0964ed68cf84ce4325d5f9dcc69c94fc9ca1ebfcbcc101d1864fafd5b85e7d5b19ba6d93 SHA512 c061d1cdc0fe35280c2a859a1960ca6fafebc57676cf9f3622cf01023abec753994d7a214fe39326b74f986ec9ee094979d113348ec6ba864d9157fb179ca9f6
+EBUILD libchewing-9999.ebuild 1350 BLAKE2B 6b76defcf101a6df5958bbdf491199ec3828f438c2939345500734e1764d9a6b15abe0d1e1513ec2b4aaee52ac7e5f749b1de82c11000f0ef5664e386f24db9c SHA512 29a7c50f47c409179c80c5ccb1422c489931fda72d8682378aa954d148048c86943c7a88f4d0eea77dab87e13e308aa7e25b3d9c0f0a390d2f0002a57549e56c
MISC metadata.xml 327 BLAKE2B 3f9b234590aef775dc0ae62f4eef42cc1e363e04c3dabc2464eee9cfb44ec0b1fc4fe9b233f781c15784b3a699537d67e884ee7f1648a62b3a863480a5e831cd SHA512 c9ab28ab34d940a30e97b989fd96904d2e0816e4b1bc5b843b082d64d6cd476335d9402640e71f2cf8f57f44dcae9af66d5e07075b189fa74f21faba40d819e3
diff --git a/app-i18n/libchewing/libchewing-0.5.1.ebuild b/app-i18n/libchewing/libchewing-0.5.1.ebuild
index 1c78e61d60e1..796538bfb4f3 100644
--- a/app-i18n/libchewing/libchewing-0.5.1.ebuild
+++ b/app-i18n/libchewing/libchewing-0.5.1.ebuild
@@ -1,4 +1,4 @@
-# Copyright 2004-2019 Gentoo Authors
+# Copyright 2004-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI="7"
@@ -26,6 +26,8 @@ KEYWORDS="amd64 ~arm ~arm64 ppc ppc64 x86"
IUSE="static-libs test"
RESTRICT="!test? ( test )"
+# BDEPEND="test? ( virtual/pkgconfig )"
+BDEPEND="virtual/pkgconfig"
RDEPEND="dev-db/sqlite:3"
DEPEND="${RDEPEND}
test? ( sys-libs/ncurses[unicode] )"
diff --git a/app-i18n/libchewing/libchewing-9999.ebuild b/app-i18n/libchewing/libchewing-9999.ebuild
index e059c2147b15..041ef001addd 100644
--- a/app-i18n/libchewing/libchewing-9999.ebuild
+++ b/app-i18n/libchewing/libchewing-9999.ebuild
@@ -1,4 +1,4 @@
-# Copyright 2004-2019 Gentoo Authors
+# Copyright 2004-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI="7"
@@ -26,6 +26,8 @@ KEYWORDS=""
IUSE="static-libs test"
RESTRICT="!test? ( test )"
+# BDEPEND="test? ( virtual/pkgconfig )"
+BDEPEND="virtual/pkgconfig"
RDEPEND="dev-db/sqlite:3"
DEPEND="${RDEPEND}
test? ( sys-libs/ncurses[unicode] )"
diff --git a/app-i18n/librime/Manifest b/app-i18n/librime/Manifest
index 702b85237f8f..834a465528f6 100644
--- a/app-i18n/librime/Manifest
+++ b/app-i18n/librime/Manifest
@@ -1,6 +1,4 @@
AUX librime-1.5.3-log_files_mode.patch 458 BLAKE2B acd2f59f409b3643807ea845fd08bfcfe730cfedaacb7d70a4b1ba017a299cb95b8d815bbbf53b2c0e3c86c4482a2d71a97fef362a6f0404dd45e96b57f78be7 SHA512 d8b12367916ed1ad5deb2e982437d3e5e04592baa6c1e12f6b78ff99b8cc407f2c9fc6f47c0715054572f6f8a5edafcb6f6da033902175cc4357a07faad04420
-DIST librime-1.4.0.tar.gz 8609316 BLAKE2B cce58c398057ac82f076e3b9e9a25cd6410e738460bd58277a9e3ed6cd11e80922bbb30e8b42828cdd5dd83f6058794b4b32fedb359315186b018ca88161794d SHA512 da68983638c6f25d994060a607f2ccab8917a71dbf10b9c6f1140c87c9ef4124e29a1d0ef16bbd0edc9a3a22bd5845aa894888d678607db372750f300638f562
DIST librime-1.5.3.tar.gz 2847083 BLAKE2B 4ffb2c5ddaf52f9c9227fa4ea019ef2965e61139f678798c08ef37dc52b863763651b63ed820caad0de1b06f48ab5c1a7a2682653340d2ae7f5f9eec3cec80b2 SHA512 4d7f6ec43bd5728f59f0b3581bcd8a46128651430b6873017d80659942b8f6aa1a2f25f439ba8fba461fe22acbf701c2da7a786104a2e4852a70a89cdc0452d3
-EBUILD librime-1.4.0.ebuild 843 BLAKE2B cf3ddd814e996bc0e8633ffb4b918281d232729773d23fc72449ebb9ddeb126ad2eba9c5e0d31dd96ecb3ebd2ac40b023144f713191e25330e3d841c90267a3a SHA512 fa8c3031447238f9015fdb43dded2c874fb711e2c24282bb8fd4dda0967513aede1170a05c82dccabb19b5d088f87b35256ede0f7cf5778e4cc91b8201a89410
EBUILD librime-1.5.3-r1.ebuild 1605 BLAKE2B a1e91aa0248c50d3561e47e4e81982137978fc77d797040768136997d8cdd1d2250e45b2b6d73e7a68b4a787426acf75507efb619713eaa893c2ab07a2a19fbf SHA512 2fe3d9cdef8176f38e3c4346542bf09181863fbd62e6968fb452da64c63ba5645ef73588f01eb3fbd1307dbde96a3867754e77c869587b33cf74ec89e02aa4f8
MISC metadata.xml 647 BLAKE2B 2837d0505f1721cbb6a60414b846ad95ddbaca6d682e9f93acc82998430cdbc73038b124c1a0a73d7aedf132535276e6bd88015a47e4f4ebb86619aad38e9061 SHA512 a388004662d39511749e6fcf618fc28411f81ebbf2078ea2b3e4123d63544790fb017ced2df16556fdf763c854d34a7fcf5a59931153f6c94841d9cdcff4d3ec
diff --git a/app-i18n/librime/librime-1.4.0.ebuild b/app-i18n/librime/librime-1.4.0.ebuild
deleted file mode 100644
index 20b5459ab2cb..000000000000
--- a/app-i18n/librime/librime-1.4.0.ebuild
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2012-2019 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=6
-
-inherit cmake-utils vcs-snapshot
-
-DESCRIPTION="Rime Input Method Engine, the core library"
-HOMEPAGE="https://rime.im/ https://github.com/rime/librime"
-SRC_URI="https://github.com/rime/${PN}/archive/${PV}.tar.gz -> ${P}.tar.gz"
-
-LICENSE="BSD"
-SLOT="0/1"
-KEYWORDS="amd64 ppc ppc64 x86"
-IUSE="test"
-RESTRICT="!test? ( test )"
-
-RDEPEND="app-i18n/opencc:=
- dev-cpp/glog:=
- dev-cpp/yaml-cpp:=
- dev-libs/boost:=[nls,threads]
- dev-libs/leveldb:=
- dev-libs/marisa:="
-DEPEND="${RDEPEND}
- x11-base/xorg-proto
- test? ( dev-cpp/gtest )"
-
-DOCS=( {CHANGELOG,README}.md )
-
-src_configure() {
- local mycmakeargs=(
- -DBUILD_TEST=$(usex test)
- -DBOOST_USE_CXX11=ON
- -DLIB_INSTALL_DIR="${EPREFIX}"/usr/$(get_libdir)
- )
- cmake-utils_src_configure
-}
diff --git a/app-i18n/mozc/Manifest b/app-i18n/mozc/Manifest
index aed1d056cd1e..977ac94979d8 100644
--- a/app-i18n/mozc/Manifest
+++ b/app-i18n/mozc/Manifest
@@ -4,12 +4,16 @@ AUX mozc-2.20.2673.102-tests_skipping.patch 2086 BLAKE2B a104d6a83b02b49e1208be1
AUX mozc-2.23.2815.102-environmental_variables.patch 4636 BLAKE2B 2c1d952899b50d0205127fe5f708c8cc8ad115db35f1ebfe5b589550203ee64fe06b0d66b10989c12063feff96f679ebd6ee4562651ac81681019634e6e9c462 SHA512 40e87a52d96794a91f5cf77f387d341b7d58a4b27e3d1455c4230fbe75107c09c3bd4784394437265548ee2704a4d1838cc0965f0333e554484dafe8b106cb7b
AUX mozc-2.23.2815.102-gcc-8.patch 496 BLAKE2B 318fcda19cf4f7e95920b5df4f5638621bcae992891941fa6f220d4dac1b2eac0faeda7a857a587baed41b361608f20c4bbda0d9a69a60b823572482c6789f46 SHA512 9c2b0e9695f4cd050f0d5f35d0e23f22715b9c6e5607629c7dc023560a5191bd755497fe6fe5306789c00e68d5bd9b7179d5515c982967788fca432134b71cad
AUX mozc-2.23.2815.102-protobuf_generated_classes_no_inheritance.patch 40296 BLAKE2B 982f43fa68031eb0f779ec4d034fef838a4fce7834291db889c9edffba3df4acd5bfdf187dd4a52ee8fd0714de943f9d5112070cd69502e1449dab4dbf8543b2 SHA512 6e05b2f22663ddbfb24a8344928ec24c0b4cf523728c1623367970b8f263142af95c056c82d0827414833d5b9f26c3a024a04a688851021601a5cbcc1474e754
+AUX mozc-2.23.2815.102-python-3_1.patch 19991 BLAKE2B e553f5b3beb7a82f9f91f585a17004f96d9bb3883b432af2f4232211de3d8c4e348cf0d1327fe2e49410112540a01533068ca76464a7deb79429bfa7c49f58c8 SHA512 7d51f282fa3132d279b979ae96d2d7a1a3009c2ad96544033084deb0b739638ae69263b0067172a890d5ca3dd7e47f412af05b8f57ee64505a040cbdb77af388
+AUX mozc-2.23.2815.102-python-3_2.patch 17794 BLAKE2B 53849d003e3821a6d07e5019d0defb9b4558f91938da2367e82cf5327d2c69e13107eb91b7c05b731e1693ca02ca1e61771b81c29d391e412a43fd0fe64973b8 SHA512 a8d017d9b5aa7c89638fcb464a016b8e98b2e20dacc0c68c4362824cad315e0c76c15cabce84059de4a3d2184c1388289f253ebd22f1c640a3946a1189955d72
+AUX mozc-2.23.2815.102-python-3_3.patch 17011 BLAKE2B b7a40ec699da304130ab8b0e149d57ebc1b31c608c03fb35104918e0d33289eb5b40211a18f2083a2addcaed68b691ef2d029c106e2f2fec861f416a2e5f2134 SHA512 52b5cd4ee5e61582f2b9172a927e9e54bf07aea90462448fd63385c7be12c37b19cbdd784a21db3950ec4269249221f4f3bb3570ce0068d5a2448db63a33463c
+AUX mozc-2.23.2815.102-python-3_4.patch 19042 BLAKE2B 18ee638acdb1d086b01034b62e40c1bbd1ac47c43447bf4c3189f0427fb559c8b442c5828304378b607850faae9b5cc571270248c21db76dfdde60ff73f6aa93 SHA512 97cd4e2e10d7fdf3806a0750f90e537649d7eb29b893c5ec1d6e32abea0e1a1f4a7e94788733486aa27948e48d3362a1a76318595463edaa7bcd1c9d9c47a194
AUX mozc-2.23.2815.102-reiwa.patch 924 BLAKE2B 3893f975d43ce29a962c5e712503442b178847acebd92e797040b6b699a37cb051f88a37965e0f194f73bca2e2397a8b070eb6d14bbe92adbba3e17a864e8c8b SHA512 df3f98ab93d7662b5ab038c732d3342b3f5860774559242eca1d0f1cd67275f315c6e4ffad83c6990ef5eb23fc19c0379ed7d3bdd0a377fcb080c066aecd16cc
AUX mozc-2.23.2815.102-server_path_check.patch 3447 BLAKE2B 8e18cf2f7100cdfeb8e6b1420d773e955994cc7bd5e4bf56e0ffe78cd9a96b044c726c1045c2cd2c326ca151c8bf527b6447b2f509a20e4a912b535f5180ec80 SHA512 106c3170112bde2c6b9eb9ad5d5d460be53bb9162eb5613445170c2ce00f88385946360d13514167a6279c610744784079f8969b8f901f22e51e6397db22b0d3
AUX mozc-2.23.2815.102-system_libraries.patch 9064 BLAKE2B 0cdf732a1bbf16d4af1d6dee81aacf3f3cb1f1c00c6aeb0fc12dac9dcd8611124e388e5fc721eb9b9472e073515d7457b182ee7cfe466b83bf319d572ae55240 SHA512 2d5b06e855f8c1889367b9b672e3ec81a037bc592872e28319e0180a0dcd177cdff817775a1a0f74ebf48e0b7558cf3b67953120411be753c662c43f409b05ce
DIST fcitx-mozc-2.23.2815.102.1.patch 295112 BLAKE2B 709b84f6eaed16da38a173f40ae7cccff362fd167e6deb4090ae8a9ec522ac8e11ccff3c9ef6433907249af8c9eb4b7be12d2c05564cabd45c25e26764286ed3 SHA512 e0d4645df919838f0fe31a97bf6dd759c222a696f68133f7584d2c771f70734ea634a25bebb03a756221000d3552423207ee5163d75778dbf480b6e267ba4cd0
DIST japanese-usage-dictionary-20120416091336.tar.gz 71051 BLAKE2B 08eecf0aa021e27a2813f58c2d37f1cec760448f6ae086ae7468b8a11575c6ef9f72f656cb4d53e0179b8a7b00f2d91ff51a0ad7825e078dcbac0810f1d8b3e1 SHA512 b7e997a979b6d50e49b9af5dc830ea4df0532f6ab1321b9ef14983f65bb54f1b2967375c82e07957ae7693ebbf43c9b56ecea6bfea8dd1fdaee444bd549d83a7
DIST mozc-2.23.2815.102.tar.gz 47739041 BLAKE2B 045a8a4a07e09cf923b67824111cdf672febc30256a6aef951ae779a3f582b3860042750d766120e376898c63be5b4baea870798a192cee34b0d48647e1ec5e6 SHA512 a3face616ca89990bca52371dcc8003604ebe0e9633116a64550add070152b1bc4d9b21e9f102c5afa6f8b1aa11d8dbc4bafbcebfaf4a12a934f085f245d548f
-EBUILD mozc-2.23.2815.102.ebuild 10873 BLAKE2B 0402f153f37cd286c38c2d14753af354bc8c2433546e56b670983f5e6d3167e7d091673be84ff4e37a9c36b0b3749298a109a15e0b863d9c5a31d3324334043d SHA512 ea21a724c331befdb6b5ffc8c6f065de40d3ead15aa26c4a230224966dd728962476efa097e3abc510f9cfc5872a4c822d0a5ac3305f7e959dab86a44e2efbcb
-EBUILD mozc-9999.ebuild 10713 BLAKE2B 8c22d2a713e01605abe92717e3b4021c76b11dc7fb0e1f6c4bd93b8000810deb1f95f506f9b17899449b0e67b62d3b5d6ce6bc557b04f3aa244a0f2300c2ef75 SHA512 709319d9bea172d9d0233657ec53831d2cc622cf93c2c57bd334c8474ccb9ab6be87324add7b96ed3cd50dae877822997dfeb2fad2b4ce42cc7a52cbac4485ee
+EBUILD mozc-2.23.2815.102.ebuild 11135 BLAKE2B fec77d0718f9167888f0e2a05878bc4fc8e4baa72505d5a5d115a40919040a31fb62d39aa3a40ff57c61f310069088c6b86989db3f917c33bcebd9e21cd44b7a SHA512 3122e5eda567097023530b184e97311ab385d39aba989cf15e62eab877efdecacba61954b42f4e10ecb7228a398b9ba88a6bca51c8d93824a449965f640199dc
+EBUILD mozc-9999.ebuild 10975 BLAKE2B 392a173331afd7c3737af4781fd45323d49689e2165d81051da12b3baa799b49d30586f18064e1be2d75c8af0fff0ce9af4e2c9e240a8da00601ca5c0b4ec860 SHA512 175d3c717f436f59f22855256a0ec09a7b94ad52dc31e1744b03dfaf69b4ae369b6abb45b77dc18b8c0158c2b21ba56a67adb89baab5e598ebf1b06bde50c464
MISC metadata.xml 1149 BLAKE2B 62ba0c21b06854dc0873cd3677245f083cb21923de1f6aeea41065b8d216f54c485f11474afae8901682d5bdb9fe79c19e4e54af2051984a0e69639d80de72d3 SHA512 55c96d85f11cca6cb6b00ab81e2b84905c20db880f0c0dc7484e4b7210e31699fc340ce4a5e32234fda68e20de9d3fef635091658b916dd238a556a60ce9476a
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch
new file mode 100644
index 000000000000..2b9bbd720cde
--- /dev/null
+++ b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_1.patch
@@ -0,0 +1,621 @@
+https://github.com/google/mozc/issues/462
+
+--- /src/base/gen_character_set.py
++++ /src/base/gen_character_set.py
+@@ -33,7 +33,6 @@
+ import itertools
+ import optparse
+ import re
+-import string
+ import sys
+
+
+@@ -89,7 +88,8 @@
+ @staticmethod
+ def _LoadTable(filename, column_index, pattern, validater):
+ result = set()
+- for line in open(filename):
++ fh = open(filename)
++ for line in fh:
+ if line.startswith('#'):
+ # Skip a comment line.
+ continue
+@@ -100,6 +100,7 @@
+ ucs = int(match.group(1), 16)
+ if validater(ucs):
+ result.add(ucs)
++ fh.close()
+
+ return result
+
+@@ -250,7 +251,7 @@
+ # (at most) four code points.
+ bit_list = []
+ for _, group in itertools.groupby(enumerate(category_list),
+- lambda (codepoint, _): codepoint / 4):
++ lambda x: x[0] // 4):
+ # Fill bits from LSB to MSB for each group.
+ bits = 0
+ for index, (_, category) in enumerate(group):
+@@ -263,7 +264,7 @@
+
+ # Output the content. Each line would have (at most) 16 bytes.
+ for _, group in itertools.groupby(enumerate(bit_list),
+- lambda (index, _): index / 16):
++ lambda x: x[0] // 16):
+ line = [' \"']
+ for _, bits in group:
+ line.append('\\x%02X' % bits)
+@@ -386,7 +387,7 @@
+ # Bitmap lookup.
+ # TODO(hidehiko): the bitmap has two huge 0-bits ranges. Reduce them.
+ category_map = [
+- (bits, category) for category, bits in CATEGORY_BITMAP.iteritems()]
++ (bits, category) for category, bits in CATEGORY_BITMAP.items()]
+ category_map.sort()
+
+ lines.extend([
+@@ -451,7 +452,7 @@
+ options.jisx0213file)
+ category_list = [
+ categorizer.GetCategory(codepoint)
+- for codepoint in xrange(categorizer.MaxCodePoint() + 1)]
++ for codepoint in range(categorizer.MaxCodePoint() + 1)]
+ generated_character_set_header = GenerateCharacterSetHeader(category_list)
+
+ # Write the result.
+--- /src/base/gen_config_file_stream_data.py
++++ /src/base/gen_config_file_stream_data.py
+@@ -58,7 +58,7 @@
+ result = []
+ result.append(' { "%s", "' % os.path.basename(path))
+ with open(path, 'rb') as stream:
+- result.extend(r'\x%02X' % ord(byte) for byte in stream.read())
++ result.extend(r'\x%02X' % byte for byte in stream.read())
+ result.append('", %d }' % os.path.getsize(path))
+
+ return ''.join(result)
+@@ -93,8 +93,8 @@
+ def main():
+ (options, args) = ParseOptions()
+ if not options.output:
+- print >>sys.stderr, (
+- 'usage: gen_config_file_stream_data.py --output=filepath input ...')
++ print('usage: gen_config_file_stream_data.py --output=filepath input ...',
++ file=sys.stderr)
+ sys.exit(2)
+
+ with open(options.output, 'w') as output:
+--- /src/build_mozc.py
++++ /src/build_mozc.py
+@@ -943,7 +943,7 @@
+ logging.info('running %s...', binary)
+ try:
+ test_function(binary, gtest_report_dir, options)
+- except RunOrDieError, e:
++ except RunOrDieError as e:
+ logging.error(e)
+ failed_tests.append(binary)
+ else:
+@@ -1082,7 +1082,7 @@
+ # and '-c' and 'Release' are build options.
+ targets = []
+ build_options = []
+- for i in xrange(len(args)):
++ for i in range(len(args)):
+ if args[i].startswith('-'):
+ # starting with build options
+ build_options = args[i:]
+@@ -1190,14 +1190,14 @@
+
+ def ShowHelpAndExit():
+ """Shows the help message."""
+- print 'Usage: build_mozc.py COMMAND [ARGS]'
+- print 'Commands: '
+- print ' gyp Generate project files.'
+- print ' build Build the specified target.'
+- print ' runtests Build all tests and run them.'
+- print ' clean Clean all the build files and directories.'
+- print ''
+- print 'See also the comment in the script for typical usage.'
++ print('Usage: build_mozc.py COMMAND [ARGS]')
++ print('Commands: ')
++ print(' gyp Generate project files.')
++ print(' build Build the specified target.')
++ print(' runtests Build all tests and run them.')
++ print(' clean Clean all the build files and directories.')
++ print('')
++ print('See also the comment in the script for typical usage.')
+ sys.exit(1)
+
+
+--- /src/build_tools/android_util.py
++++ /src/build_tools/android_util.py
+@@ -548,7 +548,7 @@
+ (devices_result, _) = process.communicate()
+ used_ports = set(int(port) for port
+ in re.findall(r'emulator-(\d+)', devices_result))
+- return [port for port in xrange(5554, 5586, 2) if port not in used_ports]
++ return [port for port in range(5554, 5586, 2) if port not in used_ports]
+
+
+ def SetUpTestingSdkHomeDirectory(dest_android_sdk_home,
+@@ -575,7 +575,7 @@
+ 'create', 'avd',
+ '--force',
+ '--sdcard', '512M',]
+- for key, value in options.iteritems():
++ for key, value in options.items():
+ args.extend([key, value])
+ env = {'ANDROID_SDK_HOME': os.path.abspath(dest_android_sdk_home)}
+ logging.info('Creating AVD: %s', args)
+@@ -615,7 +615,7 @@
+ def main():
+ for arg in sys.argv[1:]:
+ for item in sorted(GetApkProperties(arg).items()):
+- print '%s: %s' % item
++ print('%s: %s' % item)
+
+
+ if __name__ == '__main__':
+--- /src/build_tools/binary_size_checker.py
++++ /src/build_tools/binary_size_checker.py
+@@ -70,12 +70,12 @@
+ actual_size = os.stat(filename).st_size
+ expected_size = EXPECTED_MAXIMUM_SIZES[basename]
+ if actual_size < expected_size * 1024 * 1024:
+- print 'Pass: %s (size: %d) is smaller than expected (%d MB)' % (
+- filename, actual_size, expected_size)
++ print('Pass: %s (size: %d) is smaller than expected (%d MB)' % (
++ filename, actual_size, expected_size))
+ return True
+ else:
+- print 'WARNING: %s (size: %d) is larger than expected (%d MB)' % (
+- filename, actual_size, expected_size)
++ print('WARNING: %s (size: %d) is larger than expected (%d MB)' % (
++ filename, actual_size, expected_size))
+ return False
+
+
+--- /src/build_tools/build_and_sign_pkg_mac.py
++++ /src/build_tools/build_and_sign_pkg_mac.py
+@@ -44,8 +44,8 @@
+ import shutil
+ import sys
+
+-from util import PrintErrorAndExit
+-from util import RunOrDie
++from .util import PrintErrorAndExit
++from .util import RunOrDie
+
+
+ def ParseOption():
+--- /src/build_tools/build_breakpad.py
++++ /src/build_tools/build_breakpad.py
+@@ -54,9 +54,9 @@
+ try:
+ subprocess.check_output(command)
+ except subprocess.CalledProcessError as e:
+- print e.output
++ print(e.output)
+ sys.exit(e.returncode)
+- print 'Done: %s' % ' '.join(command)
++ print('Done: %s' % ' '.join(command))
+
+
+ def Xcodebuild(projdir, target, arch, sdk, outdir):
+--- /src/build_tools/build_diskimage_mac.py
++++ /src/build_tools/build_diskimage_mac.py
+@@ -90,7 +90,7 @@
+ # setup volume directory
+ temp_dir = tempfile.mkdtemp()
+ CopyFile(path.join(build_dir, ".keystone_install"), temp_dir)
+- os.chmod(path.join(temp_dir, ".keystone_install"), 0755) # rwxr-xr-x
++ os.chmod(path.join(temp_dir, ".keystone_install"), 0o755) # rwxr-xr-x
+ for a in args:
+ CopyFile(path.join(build_dir, a), temp_dir)
+
+--- /src/build_tools/change_reference_mac.py
++++ /src/build_tools/change_reference_mac.py
+@@ -41,8 +41,8 @@
+ import optparse
+ import os
+
+-from util import PrintErrorAndExit
+-from util import RunOrDie
++from .util import PrintErrorAndExit
++from .util import RunOrDie
+
+
+ def ParseOption():
+--- /src/build_tools/code_generator_util.py
++++ /src/build_tools/code_generator_util.py
+@@ -33,27 +33,26 @@
+ __author__ = "hidehiko"
+
+ import struct
+-import types
+
+
+ def ToCppStringLiteral(s):
+ """Returns C-style string literal, or NULL if given s is None."""
+ if s is None:
+- return 'NULL'
++ return b'NULL'
+
+- if all(0x20 <= ord(c) <= 0x7E for c in s):
++ if all(0x20 <= c <= 0x7E for c in s):
+ # All characters are in ascii code.
+- return '"%s"' % s.replace('\\', r'\\').replace('"', r'\"')
++ return b'"%b"' % s.replace(b'\\', br'\\').replace(b'"', br'\"')
+ else:
+ # One or more characters are non-ascii.
+- return '"%s"' % ''.join(r'\x%02X' % ord(c) for c in s)
++ return b'"%b"' % b''.join(br'\x%02X' % c for c in s)
+
+
+ def FormatWithCppEscape(format_text, *args):
+ """Returns a string filling format with args."""
+ literal_list = []
+ for arg in args:
+- if isinstance(arg, (types.StringType, types.NoneType)):
++ if isinstance(arg, (bytes, type(None))):
+ arg = ToCppStringLiteral(arg)
+ literal_list.append(arg)
+
+@@ -95,7 +94,7 @@
+ if target_compiler and target_compiler.startswith('msvs'):
+ stream.write('const uint64 k%s_data_wordtype[] = {\n' % variable_name)
+
+- for word_index in xrange(0, len(data), 8):
++ for word_index in range(0, len(data), 8):
+ word_chunk = data[word_index:word_index + 8].ljust(8, '\x00')
+ stream.write('0x%016X, ' % struct.unpack('<Q', word_chunk))
+ if (word_index / 8) % 4 == 3:
+@@ -111,7 +110,7 @@
+ stream.write('const char k%s_data[] =\n' % variable_name)
+ # Output 16bytes per line.
+ chunk_size = 16
+- for index in xrange(0, len(data), chunk_size):
++ for index in range(0, len(data), chunk_size):
+ chunk = data[index:index + chunk_size]
+ stream.write('"')
+ stream.writelines(r'\x%02X' % ord(c) for c in chunk)
+@@ -126,36 +125,50 @@
+ if type(codepoint_list) is int:
+ codepoint_list = (codepoint_list,)
+ if codepoint_list is None or len(codepoint_list) == 0:
+- return 'null'
+- result = r'"'
++ return b'null'
++ result = b'"'
+ for codepoint in codepoint_list:
+- utf16_string = unichr(codepoint).encode('utf-16be')
++ utf16_string = chr(codepoint).encode('utf-16be')
+ if len(utf16_string) == 2:
+ (u0, l0) = utf16_string
+- result += r'\u%02X%02X' % (ord(u0), ord(l0))
++ result += br'\u%02X%02X' % (u0, l0)
+ else:
+ (u0, l0, u1, l1) = utf16_string
+- result += r'\u%02X%02X\u%02X%02X' % (ord(u0), ord(l0), ord(u1), ord(l1))
+- result += r'"'
++ result += br'\u%02X%02X\u%02X%02X' % (u0, l0, u1, l1)
++ result += b'"'
+ return result
+
+
+ def SkipLineComment(stream, comment_prefix='#'):
+ """Skips line comments from stream."""
+ for line in stream:
++ if isinstance(line, bytes):
++ if isinstance(comment_prefix, str):
++ comment_prefix = comment_prefix.encode('utf-8')
++ line_ending = b'\n'
++ else:
++ line_ending = '\n'
+ stripped_line = line.strip()
+ if stripped_line and not stripped_line.startswith(comment_prefix):
+- yield line.rstrip('\n')
++ yield line.rstrip(line_ending)
+
+
+ def ParseColumnStream(stream, num_column=None, delimiter=None):
+ """Returns parsed columns read from stream."""
+ if num_column is None:
+ for line in stream:
+- yield line.rstrip('\n').split(delimiter)
++ if isinstance(line, bytes):
++ line_ending = b'\n'
++ else:
++ line_ending = '\n'
++ yield line.rstrip(line_ending).split(delimiter)
+ else:
+ for line in stream:
+- yield line.rstrip('\n').split(delimiter)[:num_column]
++ if isinstance(line, bytes):
++ line_ending = b'\n'
++ else:
++ line_ending = '\n'
++ yield line.rstrip(line_ending).split(delimiter)[:num_column]
+
+
+ def SelectColumn(stream, column_index):
+@@ -172,5 +185,5 @@
+ grouper extends the last chunk to make it an n-element chunk by adding
+ appropriate value, but this returns truncated chunk.
+ """
+- for index in xrange(0, len(iterable), n):
++ for index in range(0, len(iterable), n):
+ yield iterable[index:index + n]
+--- /src/build_tools/codesign_mac.py
++++ /src/build_tools/codesign_mac.py
+@@ -46,17 +46,17 @@
+
+ def RunOrDie(command):
+ """Run the command, or die if it failed."""
+- print "Running: " + command
++ print("Running: " + command)
+ try:
+ output = subprocess.check_output(command, shell=True)
+- print >> sys.stderr, "=========="
+- print >> sys.stderr, "COMMAND: " + command
+- print >> sys.stderr, output
++ print("==========", file=sys.stderr)
++ print("COMMAND: " + command, file=sys.stderr)
++ print(output, file=sys.stderr)
+ except subprocess.CalledProcessError as e:
+- print >> sys.stderr, "=========="
+- print >> sys.stderr, "ERROR: " + command
+- print >> sys.stderr, e.output
+- print >> sys.stderr, "=========="
++ print("==========", file=sys.stderr)
++ print("ERROR: " + command, file=sys.stderr)
++ print(e.output, file=sys.stderr)
++ print("==========", file=sys.stderr)
+ sys.exit(1)
+
+
+@@ -119,18 +119,18 @@
+ (options, unused_args) = parser.parse_args()
+
+ if not options.target:
+- print "Error: --target should be specified."
+- print parser.print_help()
++ print("Error: --target should be specified.")
++ print(parser.print_help())
+ sys.exit(1)
+
+ return options
+
+
+ def DumpEnviron():
+- print "=== os.environ ==="
++ print("=== os.environ ===")
+ for key in sorted(os.environ):
+- print "%s = %s" % (key, os.getenv(key))
+- print "=================="
++ print("%s = %s" % (key, os.getenv(key)))
++ print("==================")
+
+
+ def main():
+--- /src/build_tools/copy_dll_and_symbol.py
++++ /src/build_tools/copy_dll_and_symbol.py
+@@ -38,7 +38,7 @@
+ import os
+ import shutil
+
+-from util import PrintErrorAndExit
++from .util import PrintErrorAndExit
+
+ def ParseOption():
+ """Parse command line options."""
+@@ -98,7 +98,7 @@
+ if _GetLastModifiedTime(src) <= target_file_mtime:
+ # Older file found. Ignore.
+ continue
+- print 'Copying %s to %s' % (src, target_file_abspath)
++ print('Copying %s to %s' % (src, target_file_abspath))
+ shutil.copy2(src, target_file_abspath)
+ break
+
+--- /src/build_tools/copy_file.py
++++ /src/build_tools/copy_file.py
+@@ -52,7 +52,7 @@
+ Args:
+ message: The error message to be printed to stderr.
+ """
+- print >>sys.stderr, message
++ print(message, file=sys.stderr)
+ sys.exit(1)
+
+
+--- /src/build_tools/copy_qt_frameworks_mac.py
++++ /src/build_tools/copy_qt_frameworks_mac.py
+@@ -41,9 +41,9 @@
+ import optparse
+ import os
+
+-from copy_file import CopyFiles
+-from util import PrintErrorAndExit
+-from util import RunOrDie
++from .copy_file import CopyFiles
++from .util import PrintErrorAndExit
++from .util import RunOrDie
+
+
+ def ParseOption():
+--- /src/build_tools/embed_file.py
++++ /src/build_tools/embed_file.py
+@@ -46,10 +46,10 @@
+
+ def _FormatAsUint64LittleEndian(s):
+ """Formats a string as uint64 value in little endian order."""
+- for _ in xrange(len(s), 8):
+- s += '\0'
++ for _ in range(len(s), 8):
++ s += b'\0'
+ s = s[::-1] # Reverse the string
+- return '0x%s' % binascii.b2a_hex(s)
++ return b'0x%b' % binascii.b2a_hex(s)
+
+
+ def main():
+@@ -57,30 +57,30 @@
+ with open(opts.input, 'rb') as infile:
+ with open(opts.output, 'wb') as outfile:
+ outfile.write(
+- '#ifdef MOZC_EMBEDDED_FILE_%(name)s\n'
+- '#error "%(name)s was already included or defined elsewhere"\n'
+- '#else\n'
+- '#define MOZC_EMBEDDED_FILE_%(name)s\n'
+- 'const uint64 %(name)s_data[] = {\n'
+- % {'name': opts.name})
++ b'#ifdef MOZC_EMBEDDED_FILE_%(name)b\n'
++ b'#error "%(name)b was already included or defined elsewhere"\n'
++ b'#else\n'
++ b'#define MOZC_EMBEDDED_FILE_%(name)b\n'
++ b'const uint64 %(name)b_data[] = {\n'
++ % {b'name': opts.name.encode('utf-8')})
+
+ while True:
+ chunk = infile.read(8)
+ if not chunk:
+ break
+- outfile.write(' ')
++ outfile.write(b' ')
+ outfile.write(_FormatAsUint64LittleEndian(chunk))
+- outfile.write(',\n')
++ outfile.write(b',\n')
+
+ outfile.write(
+- '};\n'
+- 'const EmbeddedFile %(name)s = {\n'
+- ' %(name)s_data,\n'
+- ' %(size)d,\n'
+- '};\n'
+- '#endif // MOZC_EMBEDDED_FILE_%(name)s\n'
+- % {'name': opts.name,
+- 'size': os.stat(opts.input).st_size})
++ b'};\n'
++ b'const EmbeddedFile %(name)b = {\n'
++ b' %(name)b_data,\n'
++ b' %(size)d,\n'
++ b'};\n'
++ b'#endif // MOZC_EMBEDDED_FILE_%(name)b\n'
++ % {b'name': opts.name.encode('utf-8'),
++ b'size': os.stat(opts.input).st_size})
+
+
+ if __name__ == '__main__':
+--- /src/build_tools/embed_pathname.py
++++ /src/build_tools/embed_pathname.py
+@@ -28,7 +28,7 @@
+ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-"""A script to embed the given (relative) path name to C/C++ characters array.
++r"""A script to embed the given (relative) path name to C/C++ characters array.
+
+ Example:
+ ./embed_pathname.py --path_to_be_embedded=d:\data\mozc
+@@ -53,7 +53,7 @@
+
+ (options, unused_args) = parser.parse_args()
+ if not all(vars(options).values()):
+- print parser.print_help()
++ print(parser.print_help())
+ sys.exit(1)
+
+ return options
+@@ -63,7 +63,7 @@
+ opt = ParseOption()
+ path = os.path.abspath(opt.path_to_be_embedded)
+ # TODO(yukawa): Consider the case of non-ASCII characters.
+- escaped_path = path.encode('string-escape')
++ escaped_path = path.replace('\\', '\\\\')
+ with open(opt.output, 'w') as output_file:
+ output_file.write(
+ 'const char %s[] = "%s";\n' % (opt.constant_name, escaped_path))
+--- /src/build_tools/ensure_gyp_module_path.py
++++ /src/build_tools/ensure_gyp_module_path.py
+@@ -48,7 +48,7 @@
+
+ (options, _) = parser.parse_args()
+ if not options.expected:
+- print parser.print_help()
++ print(parser.print_help())
+ sys.exit(1)
+
+ return options
+@@ -59,20 +59,20 @@
+ opt = ParseOption()
+ expected_path = os.path.abspath(opt.expected)
+ if not os.path.exists(expected_path):
+- print '%s does not exist.' % expected_path
++ print('%s does not exist.' % expected_path)
+ sys.exit(1)
+
+ try:
+ import gyp # NOLINT
+ except ImportError as e:
+- print 'import gyp failed: %s' % e
++ print('import gyp failed: %s' % e)
+ sys.exit(1)
+
+ actual_path = os.path.abspath(gyp.__path__[0])
+ if expected_path != actual_path:
+- print 'Unexpected gyp module is loaded on this environment.'
+- print ' expected: %s' % expected_path
+- print ' actual : %s' % actual_path
++ print('Unexpected gyp module is loaded on this environment.')
++ print(' expected: %s' % expected_path)
++ print(' actual : %s' % actual_path)
+ sys.exit(1)
+
+ if __name__ == '__main__':
+--- /src/build_tools/gen_win32_resource_header.py
++++ /src/build_tools/gen_win32_resource_header.py
+@@ -39,7 +39,7 @@
+ __author__ = "yukawa"
+
+ import logging
+-import mozc_version
++from . import mozc_version
+ import optparse
+ import os
+ import sys
+--- /src/build_tools/mozc_version.py
++++ /src/build_tools/mozc_version.py
+@@ -94,7 +94,7 @@
+ last_digit = TARGET_PLATFORM_TO_DIGIT.get(target_platform, None)
+ if last_digit is None:
+ logging.critical('target_platform %s is invalid. Accetable ones are %s',
+- target_platform, TARGET_PLATFORM_TO_DIGIT.keys())
++ target_platform, list(TARGET_PLATFORM_TO_DIGIT.keys()))
+ sys.exit(1)
+
+ if not revision:
+@@ -314,13 +314,14 @@
+ self._properties = {}
+ if not os.path.isfile(path):
+ return
+- for line in open(path):
+- matchobj = re.match(r'(\w+)=(.*)', line.strip())
+- if matchobj:
+- var = matchobj.group(1)
+- val = matchobj.group(2)
+- if var not in self._properties:
+- self._properties[var] = val
++ with open(path) as file:
++ for line in file:
++ matchobj = re.match(r'(\w+)=(.*)', line.strip())
++ if matchobj:
++ var = matchobj.group(1)
++ val = matchobj.group(2)
++ if var not in self._properties:
++ self._properties[var] = val
+
+ # Check mandatory properties.
+ for key in VERSION_PROPERTIES:
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch
new file mode 100644
index 000000000000..456e8368049a
--- /dev/null
+++ b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_2.patch
@@ -0,0 +1,600 @@
+https://github.com/google/mozc/issues/462
+
+--- /src/build_tools/redirect.py
++++ /src/build_tools/redirect.py
+@@ -58,14 +58,15 @@
+ process = subprocess.Popen(sys.argv, stdout=subprocess.PIPE,
+ universal_newlines=True)
+ except:
+- print '=========='
+- print ' ERROR: %s' % ' '.join(sys.argv)
+- print '=========='
++ print('==========')
++ print(' ERROR: %s' % ' '.join(sys.argv))
++ print('==========')
+ raise
+ (stdout_content, _) = process.communicate()
+ # Write the stdout content to the output file.
+ output_file = open(output_file_name, 'w')
+ output_file.write(stdout_content)
++ output_file.close()
+ return process.wait()
+
+ if __name__ == '__main__':
+--- /src/build_tools/run_after_chdir.py
++++ /src/build_tools/run_after_chdir.py
+@@ -57,7 +57,7 @@
+ sys.argv.insert(0, sys.executable) # Inject the python interpreter path.
+ # We don't capture stdout and stderr from Popen. The output will just
+ # be emitted to a terminal or console.
+- print sys.argv
++ print(sys.argv)
+ sys.exit(subprocess.call(sys.argv))
+
+ if __name__ == '__main__':
+--- /src/build_tools/serialized_string_array_builder.py
++++ /src/build_tools/serialized_string_array_builder.py
+@@ -58,11 +58,11 @@
+ f.write(struct.pack('<I', array_size))
+
+ # Offset and length array of (4 + 4) * array_size bytes.
+- for i in xrange(array_size):
++ for i in range(array_size):
+ f.write(struct.pack('<I', offsets[i]))
+ f.write(struct.pack('<I', lengths[i]))
+
+ # Strings chunk.
+- for i in xrange(array_size):
++ for i in range(array_size):
+ f.write(strings[i])
+- f.write('\0')
++ f.write(b'\0')
+--- /src/build_tools/test_tools/gtest_report.py
++++ /src/build_tools/test_tools/gtest_report.py
+@@ -36,9 +36,9 @@
+
+ __author__ = "nona"
+
+-import cStringIO as StringIO
++import io
+ import logging
+-from xml.etree import cElementTree as ElementTree
++from xml.etree import ElementTree
+
+
+ class Failure(object):
+@@ -87,13 +87,13 @@
+ """Returns summarized error report text."""
+ if self.fail_num == 0:
+ return ''
+- output = StringIO.StringIO()
++ output = io.StringIO()
+ for testcase in self.testcases:
+ if not testcase.failures:
+ continue
+- print >>output, '%s.%s:' % (self.name, testcase.name)
++ print('%s.%s:' % (self.name, testcase.name), file=output)
+ for failure in testcase.failures:
+- print >>output, failure.contents.encode('utf-8')
++ print(failure.contents.encode('utf-8'), file=output)
+ return output.getvalue()
+
+ @classmethod
+--- /src/build_tools/test_tools/test_launcher.py
++++ /src/build_tools/test_tools/test_launcher.py
+@@ -101,11 +101,11 @@
+ time.sleep(1)
+ try:
+ shutil.rmtree(self._path)
+- except OSError, e:
++ except OSError as e:
+ logging.error('Failed to remove %s. error: %s', self._path, e)
+
+
+-def _ExecuteTest((command, gtest_report_dir)):
++def _ExecuteTest(args):
+ """Executes tests with specified Test command.
+
+ Args:
+@@ -122,6 +122,7 @@
+ module, which is used in multiprocessing module.
+ (http://docs.python.org/library/pickle.html)
+ """
++ (command, gtest_report_dir) = args
+ binary = command[0]
+ binary_filename = os.path.basename(binary)
+ tmp_dir = tempfile.mkdtemp()
+--- /src/build_tools/tweak_data.py
++++ /src/build_tools/tweak_data.py
+@@ -55,7 +55,7 @@
+ The value for the variable if the variable is defined in the
+ environment. Otherwise original string is returned.
+ """
+- if environment.has_key(matchobj.group(1)):
++ if matchobj.group(1) in environment:
+ return environment[matchobj.group(1)]
+ return matchobj.group(0)
+
+--- /src/build_tools/tweak_info_plist.py
++++ /src/build_tools/tweak_info_plist.py
+@@ -42,8 +42,8 @@
+ import logging
+ import optparse
+ import sys
+-import mozc_version
+-import tweak_data
++from . import mozc_version
++from . import tweak_data
+
+ _COPYRIGHT_YEAR = datetime.date.today().year
+
+@@ -81,7 +81,7 @@
+
+ version = mozc_version.MozcVersion(options.version_file)
+
+- copyright_message = (u'© %d Google Inc.' % _COPYRIGHT_YEAR).encode('utf-8')
++ copyright_message = ('© %d Google Inc.' % _COPYRIGHT_YEAR).encode('utf-8')
+ long_version = version.GetVersionString()
+ short_version = version.GetVersionInFormat('@MAJOR@.@MINOR@.@BUILD@')
+
+--- /src/build_tools/tweak_info_plist_strings.py
++++ /src/build_tools/tweak_info_plist_strings.py
+@@ -40,7 +40,7 @@
+ import logging
+ import optparse
+ import sys
+-import tweak_data
++from . import tweak_data
+
+ _COPYRIGHT_YEAR = datetime.date.today().year
+
+@@ -77,7 +77,7 @@
+ if options.branding == 'GoogleJapaneseInput':
+ variables = {
+ 'CF_BUNDLE_NAME_EN': 'Google Japanese Input',
+- 'CF_BUNDLE_NAME_JA': u'Google 日本語入力'.encode('utf-8'),
++ 'CF_BUNDLE_NAME_JA': 'Google 日本語入力'.encode('utf-8'),
+ 'NS_HUMAN_READABLE_COPYRIGHT': copyright_message,
+ 'INPUT_MODE_ANNOTATION': 'Google',
+ }
+--- /src/build_tools/tweak_macinstaller_script.py
++++ /src/build_tools/tweak_macinstaller_script.py
+@@ -39,7 +39,7 @@
+ import logging
+ import optparse
+
+-import mozc_version
++from . import mozc_version
+
+
+ def _ReplaceVariables(data, environment):
+--- /src/build_tools/tweak_pkgproj.py
++++ /src/build_tools/tweak_pkgproj.py
+@@ -45,7 +45,7 @@
+ import os
+ import plistlib
+ import re
+-import mozc_version
++from . import mozc_version
+
+ from os import path
+
+@@ -71,7 +71,7 @@
+ The value for the variable if the variable is defined in the
+ environment. Otherwise original string is returned.
+ """
+- if environment.has_key(matchobj.group(1)):
++ if matchobj.group(1) in environment:
+ return environment[matchobj.group(1)]
+ return matchobj.group(0)
+
+--- /src/build_tools/util.py
++++ /src/build_tools/util.py
+@@ -73,11 +73,11 @@
+ return 1
+
+
+-class RunOrDieError(StandardError):
++class RunOrDieError(Exception):
+ """The exception class for RunOrDie."""
+
+ def __init__(self, message):
+- StandardError.__init__(self, message)
++ Exception.__init__(self, message)
+
+
+ def RunOrDie(argv):
+@@ -105,7 +105,7 @@
+ return # Do nothing if not exist.
+ if IsWindows():
+ # Read-only files cannot be deleted on Windows.
+- os.chmod(file_name, 0700)
++ os.chmod(file_name, 0o700)
+ logging.debug('Removing file: %s', file_name)
+ os.unlink(file_name)
+
+--- /src/build_tools/zlib_util.py
++++ /src/build_tools/zlib_util.py
+@@ -58,7 +58,7 @@
+
+ def main():
+ if len(sys.argv) != 4:
+- print >>sys.stderr, 'Invalid arguments'
++ print('Invalid arguments', file=sys.stderr)
+ return
+ if sys.argv[1] == 'compress':
+ Compress(sys.argv[2], sys.argv[3])
+@@ -66,7 +66,7 @@
+ if sys.argv[1] == 'decompress':
+ Decompress(sys.argv[2], sys.argv[3])
+ return
+- print >>sys.stderr, 'Unknown command:', sys.argv[1]
++ print('Unknown command:', sys.argv[1], file=sys.stderr)
+
+
+ if __name__ == '__main__':
+--- /src/composer/internal/gen_typing_model.py
++++ /src/composer/internal/gen_typing_model.py
+@@ -54,14 +54,13 @@
+ __author__ = "noriyukit"
+
+ import bisect
+-import codecs
+ import collections
+ import optparse
+ import struct
+
+ UNDEFINED_COST = -1
+-MAX_UINT16 = struct.unpack('H', '\xFF\xFF')[0]
+-MAX_UINT8 = struct.unpack('B', '\xFF')[0]
++MAX_UINT16 = struct.unpack('H', b'\xFF\xFF')[0]
++MAX_UINT8 = struct.unpack('B', b'\xFF')[0]
+
+
+ def ParseArgs():
+@@ -113,7 +112,7 @@
+ sorted_values = list(sorted(set(values)))
+ mapping_table = sorted_values[0]
+ mapping_table_size_without_special_value = mapping_table_size - 1
+- span = len(sorted_values) / (mapping_table_size_without_special_value - 1)
++ span = len(sorted_values) // (mapping_table_size_without_special_value - 1)
+ mapping_table = [sorted_values[i * span]
+ for i
+ in range(0, mapping_table_size_without_special_value - 1)]
+@@ -150,7 +149,7 @@
+
+ def GetValueTable(unique_characters, mapping_table, dictionary):
+ result = []
+- for key, value in dictionary.iteritems():
++ for key, value in dictionary.items():
+ index = GetIndexFromKey(unique_characters, key)
+ while len(result) <= index:
+ result.append(len(mapping_table) - 1)
+@@ -167,13 +166,13 @@
+ romaji_transition_cost)
+ with open(output_path, 'wb') as f:
+ f.write(struct.pack('<I', len(unique_characters)))
+- f.write(''.join(unique_characters))
++ f.write(''.join(unique_characters).encode('utf-8'))
+ offset = 4 + len(unique_characters)
+
+ # Add padding to place value list size at 4-byte boundary.
+ if offset % 4:
+ padding_size = 4 - offset % 4
+- f.write('\x00' * padding_size)
++ f.write(b'\x00' * padding_size)
+ offset += padding_size
+
+ f.write(struct.pack('<I', len(value_list)))
+@@ -184,7 +183,7 @@
+ # Add padding to place mapping_table at 4-byte boundary.
+ if offset % 4:
+ padding_size = 4 - offset % 4
+- f.write('\x00' * padding_size)
++ f.write(b'\x00' * padding_size)
+ offset += padding_size
+
+ for v in mapping_table:
+@@ -198,7 +197,8 @@
+ # - trigram['vw']['x'] = -500 * log(P(x | 'vw'))
+ unigram = {}
+ trigram = collections.defaultdict(dict)
+- for line in codecs.open(options.input_path, 'r', encoding='utf-8'):
++ input_file = open(options.input_path, 'r', encoding='utf-8')
++ for line in input_file:
+ line = line.rstrip()
+ ngram, cost = line.split('\t')
+ cost = int(cost)
+@@ -206,6 +206,7 @@
+ unigram[ngram] = cost
+ else:
+ trigram[ngram[:-1]][ngram[-1]] = cost
++ input_file.close()
+
+ # Calculate ngram-related cost for each 'vw' and 'x':
+ # -500 * log( P('x' | 'vw') / P('x') )
+--- /src/converter/gen_boundary_data.py
++++ /src/converter/gen_boundary_data.py
+@@ -70,7 +70,8 @@
+ def LoadPatterns(file):
+ prefix = []
+ suffix = []
+- for line in open(file, 'r'):
++ fh = open(file, 'r')
++ for line in fh:
+ if len(line) <= 1 or line[0] == '#':
+ continue
+ fields = line.split()
+@@ -84,8 +85,9 @@
+ elif label == 'SUFFIX':
+ suffix.append([re.compile(PatternToRegexp(feature)), cost])
+ else:
+- print 'format error %s' % (line)
++ print('format error %s' % (line))
+ sys.exit(0)
++ fh.close()
+ return (prefix, suffix)
+
+
+@@ -100,19 +102,23 @@
+
+ def LoadFeatures(filename):
+ features = []
+- for line in open(filename, 'r'):
++ fh = open(filename, 'r')
++ for line in fh:
+ fields = line.split()
+ features.append(fields[1])
++ fh.close()
+ return features
+
+
+ def CountSpecialPos(filename):
+ count = 0
+- for line in open(filename, 'r'):
++ fh = open(filename, 'r')
++ for line in fh:
+ line = line.rstrip()
+ if not line or line[0] == '#':
+ continue
+ count += 1
++ fh.close()
+ return count
+
+
+@@ -141,7 +147,7 @@
+ f.write(struct.pack('<H', GetCost(prefix, feature)))
+ f.write(struct.pack('<H', GetCost(suffix, feature)))
+
+- for _ in xrange(num_special_pos):
++ for _ in range(num_special_pos):
+ f.write(struct.pack('<H', 0))
+ f.write(struct.pack('<H', 0))
+
+--- /src/converter/gen_quality_regression_test_data.py
++++ /src/converter/gen_quality_regression_test_data.py
+@@ -84,7 +84,7 @@
+ else _ENABLED)
+ id = issue.attributes['id'].value
+ target = GetText(issue.getElementsByTagName('target'))
+- for detail in issue.getElementsByTagName(u'detail'):
++ for detail in issue.getElementsByTagName('detail'):
+ fields = []
+ fields.append('mozcsu_%s' % id)
+ for key in ('reading', 'output', 'actionStatus', 'rank', 'accuracy'):
+@@ -104,19 +104,19 @@
+
+ def GenerateHeader(files):
+ try:
+- print 'namespace mozc{'
+- print 'struct TestCase {'
+- print ' const bool enabled;'
+- print ' const char *tsv;'
+- print '} kTestData[] = {'
++ print('namespace mozc{')
++ print('struct TestCase {')
++ print(' const bool enabled;')
++ print(' const char *tsv;')
++ print('} kTestData[] = {')
+ for file in files:
+ for enabled, line in ParseFile(file):
+- print ' {%s, "%s"},' % (enabled, EscapeString(line))
+- print ' {false, nullptr},'
+- print '};'
+- print '} // namespace mozc'
++ print(' {%s, "%s"},' % (enabled, EscapeString(line)))
++ print(' {false, nullptr},')
++ print('};')
++ print('} // namespace mozc')
+ except:
+- print 'cannot open %s' % (file)
++ print('cannot open %s' % (file))
+ sys.exit(1)
+
+
+--- /src/converter/gen_segmenter_code.py
++++ /src/converter/gen_segmenter_code.py
+@@ -54,18 +54,22 @@
+ pos = {}
+ max_id = 0
+
+- for line in open(id_file, "r"):
++ fh = open(id_file, "r")
++ for line in fh:
+ fields = line.split()
+ pos[fields[1]] = fields[0]
+ max_id = max(int(fields[0]), max_id)
++ fh.close()
+
+ max_id = max_id + 1
+- for line in open(special_pos_file, "r"):
++ fh = open(special_pos_file, "r")
++ for line in fh:
+ if len(line) <= 1 or line[0] == '#':
+ continue
+ fields = line.split()
+ pos[fields[0]] = ("%d" % max_id)
+ max_id = max_id + 1
++ fh.close()
+
+ return pos
+
+@@ -79,8 +83,7 @@
+ pat = re.compile(PatternToRegexp(pattern))
+ min = -1;
+ max = -1;
+- keys = pos.keys()
+- keys.sort()
++ keys = sorted(pos.keys())
+
+ range = []
+
+@@ -107,7 +110,7 @@
+ tmp.append("(%s >= %s && %s <= %s)" % (name, r[0], name, r[1]))
+
+ if len(tmp) == 0:
+- print "FATAL: No rule fiind %s" % (pattern)
++ print("FATAL: No rule fiind %s" % (pattern))
+ sys.exit(-1)
+
+ return " || ".join(tmp)
+@@ -115,19 +118,21 @@
+ def main():
+ pos = ReadPOSID(sys.argv[1], sys.argv[2])
+
+- print HEADER % (len(pos.keys()), len(pos.keys()))
++ print(HEADER % (len(pos.keys()), len(pos.keys())))
+
+- for line in open(sys.argv[3], "r"):
++ fh = open(sys.argv[3], "r")
++ for line in fh:
+ if len(line) <= 1 or line[0] == '#':
+ continue
+ (l, r, result) = line.split()
+ result = result.lower()
+ lcond = GetRange(pos, l, "rid") or "true";
+ rcond = GetRange(pos, r, "lid") or "true";
+- print " // %s %s %s" % (l, r, result)
+- print " if ((%s) && (%s)) { return %s; }" % (lcond, rcond, result)
++ print(" // %s %s %s" % (l, r, result))
++ print(" if ((%s) && (%s)) { return %s; }" % (lcond, rcond, result))
++ fh.close()
+
+- print FOOTER
++ print(FOOTER)
+
+ if __name__ == "__main__":
+ main()
+--- /src/data_manager/gen_connection_data.py
++++ /src/data_manager/gen_connection_data.py
+@@ -32,8 +32,7 @@
+
+ __author__ = "hidehiko"
+
+-import cStringIO as StringIO
+-import itertools
++import io
+ import logging
+ import optparse
+ import os
+@@ -45,7 +44,7 @@
+ INVALID_COST = 30000
+ INVALID_1BYTE_COST = 255
+ RESOLUTION_FOR_1BYTE = 64
+-FILE_MAGIC = '\xAB\xCD'
++FILE_MAGIC = b'\xAB\xCD'
+
+ FALSE_VALUES = ['f', 'false', '0']
+ TRUE_VALUES = ['t', 'true', '1']
+@@ -79,28 +78,28 @@
+ # The result is a square matrix.
+ mat_size = pos_size + special_pos_size
+
+- matrix = [[0] * mat_size for _ in xrange(mat_size)]
++ matrix = [[0] * mat_size for _ in range(mat_size)]
+ with open(text_connection_file) as stream:
+ stream = code_generator_util.SkipLineComment(stream)
+ # The first line contains the matrix column/row size.
+- size = stream.next().rstrip()
++ size = next(stream).rstrip()
+ assert (int(size) == pos_size), '%s != %d' % (size, pos_size)
+
+ for array_index, cost in enumerate(stream):
+ cost = int(cost.rstrip())
+- rid = array_index / pos_size
++ rid = array_index // pos_size
+ lid = array_index % pos_size
+ if rid == 0 and lid == 0:
+ cost = 0
+ matrix[rid][lid] = cost
+
+ # Fill INVALID_COST in matrix elements for special POS.
+- for rid in xrange(pos_size, mat_size):
+- for lid in xrange(1, mat_size): # Skip EOS
++ for rid in range(pos_size, mat_size):
++ for lid in range(1, mat_size): # Skip EOS
+ matrix[rid][lid] = INVALID_COST
+
+- for lid in xrange(pos_size, mat_size):
+- for rid in xrange(1, mat_size): # Skip BOS
++ for lid in range(pos_size, mat_size):
++ for rid in range(1, mat_size): # Skip BOS
+ matrix[rid][lid] = INVALID_COST
+
+ return matrix
+@@ -116,7 +115,7 @@
+ # Heuristically, we do not compress INVALID_COST.
+ continue
+ m[cost] = m.get(cost, 0) + 1
+- mode_value = max(m.iteritems(), key=lambda (_, count): count)[0]
++ mode_value = max(m.items(), key=lambda x: x[1])[0]
+ result.append(mode_value)
+ return result
+
+@@ -126,8 +125,8 @@
+ # list, and fill None into the matrix if it equals to the corresponding
+ # mode value.
+ assert len(matrix) == len(mode_value_list)
+- for row, mode_value in itertools.izip(matrix, mode_value_list):
+- for index in xrange(len(row)):
++ for row, mode_value in zip(matrix, mode_value_list):
++ for index in range(len(row)):
+ if row[index] == mode_value:
+ row[index] = None
+
+@@ -179,7 +178,7 @@
+ resolution = RESOLUTION_FOR_1BYTE
+ else:
+ resolution = 1
+- stream = StringIO.StringIO()
++ stream = io.BytesIO()
+
+ # Output header.
+ stream.write(FILE_MAGIC)
+@@ -194,7 +193,7 @@
+
+ # 4 bytes alignment.
+ if len(mode_value_list) % 2:
+- stream.write('\x00\x00')
++ stream.write(b'\x00\x00')
+
+ # Process each row:
+ for row in matrix:
+@@ -218,7 +217,7 @@
+ if cost == INVALID_COST:
+ cost = INVALID_1BYTE_COST
+ else:
+- cost /= resolution
++ cost //= resolution
+ assert cost != INVALID_1BYTE_COST
+ values.append(cost)
+
+@@ -237,7 +236,7 @@
+ values_size = len(values) * 2
+
+ # Output the bits for a row.
+- stream.write(struct.pack('<HH', len(compact_bits) / 8, values_size))
++ stream.write(struct.pack('<HH', len(compact_bits) // 8, values_size))
+ OutputBitList(chunk_bits, stream)
+ OutputBitList(compact_bits, stream)
+ if use_1byte_cost:
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch
new file mode 100644
index 000000000000..a5c5a2dc8038
--- /dev/null
+++ b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_3.patch
@@ -0,0 +1,583 @@
+https://github.com/google/mozc/issues/462
+
+--- /src/dictionary/gen_pos_map.py
++++ /src/dictionary/gen_pos_map.py
+@@ -39,7 +39,7 @@
+ from build_tools import code_generator_util
+
+
+-HEADER = """// Copyright 2009 Google Inc. All Rights Reserved.
++HEADER = b"""// Copyright 2009 Google Inc. All Rights Reserved.
+ // Author: keni
+
+ #ifndef MOZC_DICTIONARY_POS_MAP_H_
+@@ -48,13 +48,13 @@
+ // POS conversion rules
+ const POSMap kPOSMap[] = {
+ """
+-FOOTER = """};
++FOOTER = b"""};
+
+ #endif // MOZC_DICTIONARY_POS_MAP_H_
+ """
+
+ def ParseUserPos(user_pos_file):
+- with open(user_pos_file, 'r') as stream:
++ with open(user_pos_file, 'rb') as stream:
+ stream = code_generator_util.SkipLineComment(stream)
+ stream = code_generator_util.ParseColumnStream(stream, num_column=2)
+ return dict((key, enum_value) for key, enum_value in stream)
+@@ -64,7 +64,7 @@
+ user_pos_map = ParseUserPos(user_pos_file)
+
+ result = {}
+- with open(third_party_pos_map_file, 'r') as stream:
++ with open(third_party_pos_map_file, 'rb') as stream:
+ stream = code_generator_util.SkipLineComment(stream)
+ for columns in code_generator_util.ParseColumnStream(stream, num_column=2):
+ third_party_pos_name, mozc_pos = (columns + [None])[:2]
+@@ -78,7 +78,7 @@
+ result[third_party_pos_name] = mozc_pos
+
+ # Create mozc_pos to mozc_pos map.
+- for key, value in user_pos_map.iteritems():
++ for key, value in user_pos_map.items():
+ if key in result:
+ assert (result[key] == value)
+ continue
+@@ -94,10 +94,10 @@
+ if value is None:
+ # Invalid PosType.
+ value = (
+- 'static_cast< ::mozc::user_dictionary::UserDictionary::PosType>(-1)')
++ b'static_cast< ::mozc::user_dictionary::UserDictionary::PosType>(-1)')
+ else:
+- value = '::mozc::user_dictionary::UserDictionary::' + value
+- output.write(' { %s, %s },\n' % (key, value))
++ value = b'::mozc::user_dictionary::UserDictionary::' + value
++ output.write(b' { %s, %s },\n' % (key, value))
+ output.write(FOOTER)
+
+
+@@ -121,7 +121,7 @@
+ pos_map = GeneratePosMap(options.third_party_pos_map_file,
+ options.user_pos_file)
+
+- with open(options.output, 'w') as stream:
++ with open(options.output, 'wb') as stream:
+ OutputPosMap(pos_map, stream)
+
+
+--- /src/dictionary/gen_pos_rewrite_rule.py
++++ /src/dictionary/gen_pos_rewrite_rule.py
+@@ -46,29 +46,34 @@
+
+
+ def LoadRewriteMapRule(filename):
+- fh = open(filename)
++ fh = open(filename, 'rb')
+ rule = []
+ for line in fh:
+- line = line.rstrip('\n')
+- if not line or line.startswith('#'):
++ line = line.rstrip(b'\n')
++ if not line or line.startswith(b'#'):
+ continue
+ fields = line.split()
+ rule.append([fields[0], fields[1]])
++ fh.close()
+ return rule
+
+
+ def ReadPOSID(id_file, special_pos_file):
+ pos_list = []
+
+- for line in open(id_file, 'r'):
++ fh = open(id_file, 'rb')
++ for line in fh:
+ fields = line.split()
+ pos_list.append(fields[1])
++ fh.close()
+
+- for line in open(special_pos_file, 'r'):
+- if len(line) <= 1 or line[0] == '#':
++ fh = open(special_pos_file, 'rb')
++ for line in fh:
++ if len(line) <= 1 or line[0:1] == b'#':
+ continue
+ fields = line.split()
+ pos_list.append(fields[0])
++ fh.close()
+
+ return pos_list
+
+@@ -112,7 +117,7 @@
+ ids.append(id)
+
+ with open(opts.output, 'wb') as f:
+- f.write(''.join(chr(id) for id in ids))
++ f.write(''.join(chr(id) for id in ids).encode('utf-8'))
+
+
+ if __name__ == '__main__':
+--- /src/dictionary/gen_suffix_data.py
++++ /src/dictionary/gen_suffix_data.py
+@@ -52,10 +52,10 @@
+ opts = _ParseOptions()
+
+ result = []
+- with open(opts.input, 'r') as stream:
++ with open(opts.input, 'rb') as stream:
+ for line in stream:
+- line = line.rstrip('\r\n')
+- fields = line.split('\t')
++ line = line.rstrip(b'\r\n')
++ fields = line.split(b'\t')
+ key = fields[0]
+ lid = int(fields[1])
+ rid = int(fields[2])
+@@ -63,7 +63,7 @@
+ value = fields[4]
+
+ if key == value:
+- value = ''
++ value = b''
+
+ result.append((key, value, lid, rid, cost))
+
+--- /src/dictionary/gen_user_pos_data.py
++++ /src/dictionary/gen_user_pos_data.py
+@@ -64,7 +64,7 @@
+ f.write(struct.pack('<H', conjugation_id))
+
+ serialized_string_array_builder.SerializeToFile(
+- sorted(string_index.iterkeys()), output_string_array)
++ sorted(x.encode('utf-8') for x in string_index.keys()), output_string_array)
+
+
+ def ParseOptions():
+@@ -100,7 +100,7 @@
+
+ if options.output_pos_list:
+ serialized_string_array_builder.SerializeToFile(
+- [pos for (pos, _) in user_pos.data], options.output_pos_list)
++ [pos.encode('utf-8') for (pos, _) in user_pos.data], options.output_pos_list)
+
+
+ if __name__ == '__main__':
+--- /src/dictionary/gen_zip_code_seed.py
++++ /src/dictionary/gen_zip_code_seed.py
+@@ -83,7 +83,7 @@
+ address = unicodedata.normalize('NFKC', self.address)
+ line = '\t'.join([zip_code, '0', '0', str(ZIP_CODE_COST),
+ address, ZIP_CODE_LABEL])
+- print line.encode('utf-8')
++ print(line.encode('utf-8'))
+
+
+ def ProcessZipCodeCSV(file_name):
+@@ -105,26 +105,26 @@
+
+ def ReadZipCodeEntries(zip_code, level1, level2, level3):
+ """Read zip code entries."""
+- return [ZipEntry(zip_code, u''.join([level1, level2, town]))
++ return [ZipEntry(zip_code, ''.join([level1, level2, town]))
+ for town in ParseTownName(level3)]
+
+
+ def ReadJigyosyoEntry(zip_code, level1, level2, level3, name):
+ """Read jigyosyo entry."""
+ return ZipEntry(zip_code,
+- u''.join([level1, level2, level3, u' ', name]))
++ ''.join([level1, level2, level3, ' ', name]))
+
+
+ def ParseTownName(level3):
+ """Parse town name."""
+- if level3.find(u'以下に掲載がない場合') != -1:
++ if level3.find('以下に掲載がない場合') != -1:
+ return ['']
+
+ assert CanParseAddress(level3), ('failed to be merged %s'
+ % level3.encode('utf-8'))
+
+ # We ignore additional information here.
+- level3 = re.sub(u'(.*)', u'', level3, re.U)
++ level3 = re.sub('(.*)', '', level3, re.U)
+
+ # For 地割, we have these cases.
+ # XX1地割
+@@ -134,7 +134,7 @@
+ # XX第1地割、XX第2地割、
+ # XX第1地割〜XX第2地割、
+ # We simply use XX for them.
+- chiwari_match = re.match(u'(\D*?)第?\d+地割.*', level3, re.U)
++ chiwari_match = re.match('(\D*?)第?\d+地割.*', level3, re.U)
+ if chiwari_match:
+ town = chiwari_match.group(1)
+ return [town]
+@@ -144,21 +144,21 @@
+ # -> XX町YY and (XX町)ZZ
+ # YY、ZZ
+ # -> YY and ZZ
+- chou_match = re.match(u'(.*町)?(.*)', level3, re.U)
++ chou_match = re.match('(.*町)?(.*)', level3, re.U)
+ if chou_match:
+- chou = u''
++ chou = ''
+ if chou_match.group(1):
+ chou = chou_match.group(1)
+ rests = chou_match.group(2)
+- return [chou + rest for rest in rests.split(u'、')]
++ return [chou + rest for rest in rests.split('、')]
+
+ return [level3]
+
+
+ def CanParseAddress(address):
+ """Return true for valid address."""
+- return (address.find(u'(') == -1 or
+- address.find(u')') != -1)
++ return (address.find('(') == -1 or
++ address.find(')') != -1)
+
+
+ def ParseOptions():
+--- /src/dictionary/zip_code_util.py
++++ /src/dictionary/zip_code_util.py
+@@ -86,11 +86,11 @@
+
+
+ _SPECIAL_CASES = [
+- SpecialMergeZip(u'5900111', u'大阪府', u'堺市中区', [u'三原台']),
+- SpecialMergeZip(u'8710046', u'大分県', u'中津市',
+- [u'金谷', u'西堀端', u'東堀端', u'古金谷']),
+- SpecialMergeZip(u'9218046', u'石川県', u'金沢市',
+- [u'大桑町', u'三小牛町']),
++ SpecialMergeZip('5900111', '大阪府', '堺市中区', ['三原台']),
++ SpecialMergeZip('8710046', '大分県', '中津市',
++ ['金谷', '西堀端', '東堀端', '古金谷']),
++ SpecialMergeZip('9218046', '石川県', '金沢市',
++ ['大桑町', '三小牛町']),
+ ]
+
+
+--- /src/gui/character_pad/data/gen_cp932_map.py
++++ /src/gui/character_pad/data/gen_cp932_map.py
+@@ -32,7 +32,6 @@
+
+ import re
+ import sys
+-import string
+
+ kUnicodePat = re.compile(r'0x[0-9A-Fa-f]{2,4}')
+ def IsValidUnicode(n):
+@@ -42,28 +41,29 @@
+ fh = open(sys.argv[1])
+ result = {}
+ for line in fh.readlines():
+- if line[0] is '#':
++ if line[0] == '#':
+ continue
+- array = string.split(line)
++ array = line.split()
+ sjis = array[0]
+ ucs2 = array[1]
+ if eval(sjis) < 32 or not IsValidUnicode(ucs2):
+ continue
+ result.setdefault(ucs2, sjis)
++ fh.close()
+
+ keys = sorted(result.keys())
+
+- print "struct CP932MapData {"
+- print " unsigned int ucs4;"
+- print " unsigned short int sjis;"
+- print "};"
+- print ""
+- print "static const size_t kCP932MapDataSize = %d;" % (len(keys))
+- print "static const CP932MapData kCP932MapData[] = {"
++ print("struct CP932MapData {")
++ print(" unsigned int ucs4;")
++ print(" unsigned short int sjis;")
++ print("};")
++ print("")
++ print("static const size_t kCP932MapDataSize = %d;" % (len(keys)))
++ print("static const CP932MapData kCP932MapData[] = {")
+ for n in keys:
+- print " { %s, %s }," % (n ,result[n])
+- print " { 0, 0 }";
+- print "};"
++ print(" { %s, %s }," % (n ,result[n]))
++ print(" { 0, 0 }");
++ print("};")
+
+ if __name__ == "__main__":
+ main()
+--- /src/gui/character_pad/data/gen_local_character_map.py
++++ /src/gui/character_pad/data/gen_local_character_map.py
+@@ -30,7 +30,6 @@
+
+ __author__ = "taku"
+
+-import string
+ import re
+ import sys
+
+@@ -43,9 +42,9 @@
+ fh = open(filename)
+ result = []
+ for line in fh.readlines():
+- if line[0] is '#':
++ if line[0] == '#':
+ continue
+- array = string.split(line)
++ array = line.split()
+ jis = array[0].replace('0x', '')
+ ucs2 = array[1].replace('0x', '')
+ if len(jis) == 2:
+@@ -53,6 +52,7 @@
+
+ if IsValidUnicode(ucs2):
+ result.append([jis, ucs2])
++ fh.close()
+
+ return ["JISX0201", result]
+
+@@ -60,13 +60,14 @@
+ fh = open(filename)
+ result = []
+ for line in fh.readlines():
+- if line[0] is '#':
++ if line[0] == '#':
+ continue
+ array = line.split()
+ jis = array[1].replace('0x', '')
+ ucs2 = array[2].replace('0x', '')
+ if IsValidUnicode(ucs2):
+ result.append([jis, ucs2])
++ fh.close()
+
+ return ["JISX0208", result]
+
+@@ -74,13 +75,14 @@
+ fh = open(filename)
+ result = []
+ for line in fh.readlines():
+- if line[0] is '#':
++ if line[0] == '#':
+ continue
+ array = line.split()
+ jis = array[0].replace('0x', '')
+ ucs2 = array[1].replace('0x', '')
+ if IsValidUnicode(ucs2):
+ result.append([jis, ucs2])
++ fh.close()
+
+ return ["JISX0212", result]
+
+@@ -88,7 +90,7 @@
+ fh = open(filename)
+ result = []
+ for line in fh.readlines():
+- if line[0] is '#':
++ if line[0] == '#':
+ continue
+ array = line.split()
+ sjis = array[0].replace('0x', '')
+@@ -100,19 +102,20 @@
+
+ if IsValidUnicode(ucs2):
+ result.append([sjis, ucs2])
++ fh.close()
+
+ return ["CP932", result]
+
+ def Output(arg):
+ name = arg[0]
+ result = arg[1]
+- print "static const size_t k%sMapSize = %d;" % (name, len(result))
+- print "static const mozc::gui::CharacterPalette::LocalCharacterMap k%sMap[] = {" % (name)
++ print("static const size_t k%sMapSize = %d;" % (name, len(result)))
++ print("static const mozc::gui::CharacterPalette::LocalCharacterMap k%sMap[] = {" % (name))
+ for n in result:
+- print " { 0x%s, 0x%s }," % (n[0] ,n[1])
+- print " { 0, 0 }";
+- print "};"
+- print ""
++ print(" { 0x%s, 0x%s }," % (n[0] ,n[1]))
++ print(" { 0, 0 }");
++ print("};")
++ print("")
+
+ if __name__ == "__main__":
+ Output(LoadJISX0201(sys.argv[1]))
+--- /src/gui/character_pad/data/gen_unicode_blocks.py
++++ /src/gui/character_pad/data/gen_unicode_blocks.py
+@@ -33,13 +33,13 @@
+ import sys
+ import re
+
+-re = re.compile('^(.....?)\.\.(.....?); (.+)')
++re = re.compile(r'^(.....?)\.\.(.....?); (.+)')
+
+ def main():
+- print "static const mozc::gui::CharacterPalette::UnicodeBlock kUnicodeBlockTable[] = {"
++ print("static const mozc::gui::CharacterPalette::UnicodeBlock kUnicodeBlockTable[] = {")
+ fh = open(sys.argv[1])
+ for line in fh.readlines():
+- if line[0] is '#':
++ if line[0] == '#':
+ continue
+ m = re.match(line)
+ if m is not None:
+@@ -47,11 +47,12 @@
+ end = int(m.group(2), 16)
+ name = m.group(3)
+ if start <= 0x2FFFF and end <= 0x2FFFF:
+- print " { \"%s\", { %d, %d } }," % (name, start, end)
++ print(" { \"%s\", { %d, %d } }," % (name, start, end))
++ fh.close()
+
+- print " { NULL, { 0, 0 } }"
+- print "};"
+- print ""
++ print(" { NULL, { 0, 0 } }")
++ print("};")
++ print("")
+
+ if __name__ == "__main__":
+ main()
+--- /src/gui/character_pad/data/gen_unicode_data.py
++++ /src/gui/character_pad/data/gen_unicode_data.py
+@@ -46,18 +46,19 @@
+ code = int(code, 16)
+ if code < 0x2FFFF:
+ results.append(" { %d, \"%s\" }," % (code, desc))
++ fh.close()
+
+- print "struct UnicodeData {";
+- print " char32 ucs4;";
+- print " const char *description;";
+- print "};";
+- print ""
+- print "static const size_t kUnicodeDataSize = %d;" % (len(results))
+- print "static const UnicodeData kUnicodeData[] = {";
++ print("struct UnicodeData {");
++ print(" char32 ucs4;");
++ print(" const char *description;");
++ print("};");
++ print("")
++ print("static const size_t kUnicodeDataSize = %d;" % (len(results)))
++ print("static const UnicodeData kUnicodeData[] = {");
+ for line in results:
+- print line;
+- print " { 0, NULL }";
+- print "};";
++ print(line);
++ print(" { 0, NULL }");
++ print("};");
+
+ if __name__ == "__main__":
+ main()
+--- /src/gui/character_pad/data/gen_unihan_data.py
++++ /src/gui/character_pad/data/gen_unihan_data.py
+@@ -31,35 +31,34 @@
+ __author__ = "taku"
+
+ import re
+-import string
+ import sys
+ rs = {}
+
+ def Escape(n):
+- if n is not "NULL":
++ if n != "NULL":
+ return "\"%s\"" % (n)
+ else:
+ return "NULL"
+
+ def GetCode(n):
+- if n is not "NULL":
+- n = string.replace(n, '0-', 'JIS X 0208: 0x')
+- n = string.replace(n, '1-', 'JIS X 0212: 0x')
+- n = string.replace(n, '3-', 'JIS X 0213: 0x')
+- n = string.replace(n, '4-', 'JIS X 0213: 0x')
+- n = string.replace(n, 'A-', 'Vendors Ideographs: 0x')
+- n = string.replace(n, '3A', 'JIS X 0213 2000: 0x')
++ if n != "NULL":
++ n = n.replace('0-', 'JIS X 0208: 0x')
++ n = n.replace('1-', 'JIS X 0212: 0x')
++ n = n.replace('3-', 'JIS X 0213: 0x')
++ n = n.replace('4-', 'JIS X 0213: 0x')
++ n = n.replace('A-', 'Vendors Ideographs: 0x')
++ n = n.replace('3A', 'JIS X 0213 2000: 0x')
+ return "\"%s\"" % n
+ else:
+ return "NULL"
+
+ def GetRadical(n):
+ pat = re.compile(r'^(\d+)\.')
+- if n is not "NULL":
++ if n != "NULL":
+ m = pat.match(n)
+ if m:
+ result = rs[m.group(1)]
+- return "\"%s\"" % (result.encode('string_escape'))
++ return "\"%s\"" % result
+ else:
+ return "NULL"
+ else:
+@@ -73,6 +72,7 @@
+ id = array[1]
+ radical = array[2]
+ rs[id] = radical
++ fh.close()
+
+ dic = {}
+ pat = re.compile(r'^U\+(\S+)\s+(kTotalStrokes|kJapaneseKun|kJapaneseOn|kRSUnicode|kIRG_JSource)\t(.+)')
+@@ -86,23 +86,24 @@
+ n = int(m.group(1), 16)
+ if n <= 65536:
+ dic.setdefault(key, {}).setdefault(field, value)
++ fh.close()
+
+ keys = sorted(dic.keys())
+
+- print "struct UnihanData {";
+- print " unsigned int ucs4;";
++ print("struct UnihanData {");
++ print(" unsigned int ucs4;");
+ # Since the total strokes defined in Unihan data is Chinese-based
+ # number, we can't use it.
+ # print " unsigned char total_strokes;";
+- print " const char *japanese_kun;";
+- print " const char *japanese_on;";
++ print(" const char *japanese_kun;");
++ print(" const char *japanese_on;");
+ # Since the radical information defined in Unihan data is Chinese-based
+ # number, we can't use it.
+ # print " const char *radical;";
+- print " const char *IRG_jsource;";
+- print "};"
+- print "static const size_t kUnihanDataSize = %d;" % (len(keys))
+- print "static const UnihanData kUnihanData[] = {"
++ print(" const char *IRG_jsource;");
++ print("};")
++ print("static const size_t kUnihanDataSize = %d;" % (len(keys)))
++ print("static const UnihanData kUnihanData[] = {")
+
+ for key in keys:
+ total_strokes = dic[key].get("kTotalStrokes", "0")
+@@ -111,9 +112,9 @@
+ rad = GetRadical(dic[key].get("kRSUnicode", "NULL"))
+ code = GetCode(dic[key].get("kIRG_JSource", "NULL"))
+ # print " { 0x%s, %s, %s, %s, %s, %s }," % (key, total_strokes, kun, on, rad, code)
+- print " { 0x%s, %s, %s, %s }," % (key, kun, on, code)
++ print(" { 0x%s, %s, %s, %s }," % (key, kun, on, code))
+
+- print "};"
++ print("};")
+
+ if __name__ == "__main__":
+ main()
diff --git a/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch
new file mode 100644
index 000000000000..41d2bf9eeb90
--- /dev/null
+++ b/app-i18n/mozc/files/mozc-2.23.2815.102-python-3_4.patch
@@ -0,0 +1,537 @@
+https://github.com/google/mozc/issues/462
+
+--- /src/prediction/gen_zero_query_data.py
++++ /src/prediction/gen_zero_query_data.py
+@@ -59,20 +59,20 @@
+ Returns:
+ A integer indicating parsed pua.
+ """
+- if not s or s[0] == '>':
++ if not s or s[0:1] == b'>':
+ return 0
+ return int(s, 16)
+
+
+ def NormalizeString(string):
+ return unicodedata.normalize(
+- 'NFKC', string.decode('utf-8')).encode('utf-8').replace('~', '〜')
++ 'NFKC', string.decode('utf-8')).replace('~', '〜').encode('utf-8')
+
+
+ def RemoveTrailingNumber(string):
+ if not string:
+- return ''
+- return re.sub(r'^([^0-9]+)[0-9]+$', r'\1', string)
++ return b''
++ return re.sub(br'^([^0-9]+)[0-9]+$', r'\1', string)
+
+
+ def GetReadingsFromDescription(description):
+@@ -84,19 +84,19 @@
+ # - ビル・建物
+ # \xE3\x83\xBB : "・"
+ return [RemoveTrailingNumber(token) for token
+- in re.split(r'(?:\(|\)|/|\xE3\x83\xBB)+', normalized)]
++ in re.split(br'(?:\(|\)|/|\xE3\x83\xBB)+', normalized)]
+
+
+ def ReadEmojiTsv(stream):
+ """Reads emoji data from stream and returns zero query data."""
+ zero_query_dict = defaultdict(list)
+ stream = code_generator_util.SkipLineComment(stream)
+- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
++ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
+ if len(columns) != 13:
+- logging.critical('format error: %s', '\t'.join(columns))
++ logging.critical('format error: %s', b'\t'.join(columns))
+ sys.exit(1)
+
+- code_points = columns[0].split(' ')
++ code_points = columns[0].split(b' ')
+
+ # Emoji code point.
+ emoji = columns[1]
+@@ -114,12 +114,12 @@
+ # - Composite emoji which has multiple code point.
+ # NOTE: Some Unicode 6.0 emoji don't have PUA, and it is also omitted.
+ # TODO(hsumita): Check the availability of such emoji and enable it.
+- logging.info('Skip %s', ' '.join(code_points))
++ logging.info('Skip %s', b' '.join(code_points))
+ continue
+
+ reading_list = []
+ # \xe3\x80\x80 is a full-width space
+- for reading in re.split(r'(?: |\xe3\x80\x80)+', NormalizeString(readings)):
++ for reading in re.split(br'(?: |\xe3\x80\x80)+', NormalizeString(readings)):
+ if not reading:
+ continue
+ reading_list.append(reading)
+@@ -158,15 +158,15 @@
+ zero_query_dict = defaultdict(list)
+
+ for line in input_stream:
+- if line.startswith('#'):
++ if line.startswith(b'#'):
+ continue
+- line = line.rstrip('\r\n')
++ line = line.rstrip(b'\r\n')
+ if not line:
+ continue
+
+- tokens = line.split('\t')
++ tokens = line.split(b'\t')
+ key = tokens[0]
+- values = tokens[1].split(',')
++ values = tokens[1].split(b',')
+
+ for value in values:
+ zero_query_dict[key].append(
+@@ -179,16 +179,16 @@
+ """Reads emoticon data from stream and returns zero query data."""
+ zero_query_dict = defaultdict(list)
+ stream = code_generator_util.SkipLineComment(stream)
+- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
++ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
+ if len(columns) != 3:
+- logging.critical('format error: %s', '\t'.join(columns))
++ logging.critical('format error: %s', b'\t'.join(columns))
+ sys.exit(1)
+
+ emoticon = columns[0]
+ readings = columns[2]
+
+ # \xe3\x80\x80 is a full-width space
+- for reading in re.split(r'(?: |\xe3\x80\x80)+', readings.strip()):
++ for reading in re.split(br'(?: |\xe3\x80\x80)+', readings.strip()):
+ if not reading:
+ continue
+ zero_query_dict[reading].append(
+@@ -202,9 +202,9 @@
+ """Reads emoji data from stream and returns zero query data."""
+ zero_query_dict = defaultdict(list)
+ stream = code_generator_util.SkipLineComment(stream)
+- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
++ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
+ if len(columns) < 3:
+- logging.warning('format error: %s', '\t'.join(columns))
++ logging.warning('format error: %s', b'\t'.join(columns))
+ continue
+
+ symbol = columns[1]
+@@ -222,7 +222,7 @@
+ continue
+
+ # \xe3\x80\x80 is a full-width space
+- for reading in re.split(r'(?: |\xe3\x80\x80)+', readings.strip()):
++ for reading in re.split(br'(?: |\xe3\x80\x80)+', readings.strip()):
+ if not reading:
+ continue
+ zero_query_dict[reading].append(
+@@ -247,7 +247,7 @@
+
+ def IsValidKeyForZeroQuery(key):
+ """Returns if the key is valid for zero query trigger."""
+- is_ascii = all(ord(char) < 128 for char in key)
++ is_ascii = all(char < 128 for char in key)
+ return not is_ascii
+
+
+@@ -301,13 +301,13 @@
+
+ def main():
+ options = ParseOptions()
+- with open(options.input_rule, 'r') as input_stream:
++ with open(options.input_rule, 'rb') as input_stream:
+ zero_query_rule_dict = ReadZeroQueryRuleData(input_stream)
+- with open(options.input_symbol, 'r') as input_stream:
++ with open(options.input_symbol, 'rb') as input_stream:
+ zero_query_symbol_dict = ReadSymbolTsv(input_stream)
+- with open(options.input_emoji, 'r') as input_stream:
++ with open(options.input_emoji, 'rb') as input_stream:
+ zero_query_emoji_dict = ReadEmojiTsv(input_stream)
+- with open(options.input_emoticon, 'r') as input_stream:
++ with open(options.input_emoticon, 'rb') as input_stream:
+ zero_query_emoticon_dict = ReadEmoticonTsv(input_stream)
+
+ merged_zero_query_dict = MergeZeroQueryData(
+--- /src/prediction/gen_zero_query_number_data.py
++++ /src/prediction/gen_zero_query_number_data.py
+@@ -41,15 +41,15 @@
+ zero_query_dict = defaultdict(list)
+
+ for line in input_stream:
+- if line.startswith('#'):
++ if line.startswith(b'#'):
+ continue
+- line = line.rstrip('\r\n')
++ line = line.rstrip(b'\r\n')
+ if not line:
+ continue
+
+- tokens = line.split('\t')
++ tokens = line.split(b'\t')
+ key = tokens[0]
+- values = tokens[1].split(',')
++ values = tokens[1].split(b',')
+
+ for value in values:
+ zero_query_dict[key].append(
+@@ -71,7 +71,7 @@
+
+ def main():
+ options = ParseOption()
+- with open(options.input, 'r') as input_stream:
++ with open(options.input, 'rb') as input_stream:
+ zero_query_dict = ReadZeroQueryNumberData(input_stream)
+ util.WriteZeroQueryData(zero_query_dict,
+ options.output_token_array,
+--- /src/prediction/gen_zero_query_util.py
++++ /src/prediction/gen_zero_query_util.py
+@@ -69,7 +69,7 @@
+ output_string_array):
+ # Collect all the strings and assing index in ascending order
+ string_index = {}
+- for key, entry_list in zero_query_dict.iteritems():
++ for key, entry_list in zero_query_dict.items():
+ string_index[key] = 0
+ for entry in entry_list:
+ string_index[entry.value] = 0
+--- /src/rewriter/gen_counter_suffix_array.py
++++ /src/rewriter/gen_counter_suffix_array.py
+@@ -43,7 +43,7 @@
+ with codecs.open(id_file, 'r', encoding='utf-8') as stream:
+ stream = code_generator_util.ParseColumnStream(stream, num_column=2)
+ for pos_id, pos_name in stream:
+- if pos_name.startswith(u'名詞,接尾,助数詞'):
++ if pos_name.startswith('名詞,接尾,助数詞'):
+ pos_ids.add(pos_id)
+ return pos_ids
+
+--- /src/rewriter/gen_emoji_rewriter_data.py
++++ /src/rewriter/gen_emoji_rewriter_data.py
+@@ -74,19 +74,19 @@
+ the glyph (in other words, it has alternative (primary) code point, which
+ doesn't lead '>' and that's why we'll ignore it).
+ """
+- if not s or s[0] == '>':
++ if not s or s[0:1] == b'>':
+ return None
+ return int(s, 16)
+
+
+-_FULLWIDTH_RE = re.compile(ur'[!-~]') # U+FF01 - U+FF5E
++_FULLWIDTH_RE = re.compile(r'[!-~]') # U+FF01 - U+FF5E
+
+
+ def NormalizeString(string):
+ """Normalize full width ascii characters to half width characters."""
+- offset = ord(u'A') - ord(u'A')
+- return _FULLWIDTH_RE.sub(lambda x: unichr(ord(x.group(0)) - offset),
+- unicode(string, 'utf-8')).encode('utf-8')
++ offset = ord('A') - ord('A')
++ return _FULLWIDTH_RE.sub(lambda x: chr(ord(x.group(0)) - offset),
++ string.decode('utf-8')).encode('utf-8')
+
+
+ def ReadEmojiTsv(stream):
+@@ -96,14 +96,14 @@
+ token_dict = defaultdict(list)
+
+ stream = code_generator_util.SkipLineComment(stream)
+- for columns in code_generator_util.ParseColumnStream(stream, delimiter='\t'):
++ for columns in code_generator_util.ParseColumnStream(stream, delimiter=b'\t'):
+ if len(columns) != 13:
+- logging.critical('format error: %s', '\t'.join(columns))
++ logging.critical('format error: %s', b'\t'.join(columns))
+ sys.exit(1)
+
+- code_points = columns[0].split(' ')
++ code_points = columns[0].split(b' ')
+ # Emoji code point.
+- emoji = columns[1] if columns[1] else ''
++ emoji = columns[1] if columns[1] else b''
+ android_pua = ParseCodePoint(columns[2])
+ docomo_pua = ParseCodePoint(columns[3])
+ softbank_pua = ParseCodePoint(columns[4])
+@@ -112,10 +112,10 @@
+ readings = columns[6]
+
+ # [7]: Name defined in Unicode. It is ignored in current implementation.
+- utf8_description = columns[8] if columns[8] else ''
+- docomo_description = columns[9] if columns[9] else ''
+- softbank_description = columns[10] if columns[10] else ''
+- kddi_description = columns[11] if columns[11] else ''
++ utf8_description = columns[8] if columns[8] else b''
++ docomo_description = columns[9] if columns[9] else b''
++ softbank_description = columns[10] if columns[10] else b''
++ kddi_description = columns[11] if columns[11] else b''
+
+ if not android_pua or len(code_points) > 1:
+ # Skip some emoji, which is not supported on old devices.
+@@ -123,7 +123,7 @@
+ # - Composite emoji which has multiple code point.
+ # NOTE: Some Unicode 6.0 emoji don't have PUA, and it is also omitted.
+ # TODO(hsumita): Check the availability of such emoji and enable it.
+- logging.info('Skip %s', ' '.join(code_points))
++ logging.info('Skip %s', b' '.join(code_points))
+ continue
+
+ # Check consistency between carrier PUA codes and descriptions for Android
+@@ -132,7 +132,7 @@
+ (bool(softbank_pua) != bool(softbank_description)) or
+ (bool(kddi_pua) != bool(kddi_description))):
+ logging.warning('carrier PUA and description conflict: %s',
+- '\t'.join(columns))
++ b'\t'.join(columns))
+ continue
+
+ # Check if the character is usable on Android.
+@@ -140,7 +140,7 @@
+ android_pua = 0 # Replace None with 0.
+
+ if not emoji and not android_pua:
+- logging.info('Skip: %s', '\t'.join(columns))
++ logging.info('Skip: %s', b'\t'.join(columns))
+ continue
+
+ index = len(emoji_data_list)
+@@ -149,7 +149,7 @@
+ kddi_description))
+
+ # \xe3\x80\x80 is a full-width space
+- for reading in re.split(r'(?: |\xe3\x80\x80)+', readings.strip()):
++ for reading in re.split(br'(?: |\xe3\x80\x80)+', readings.strip()):
+ if reading:
+ token_dict[NormalizeString(reading)].append(index)
+
+@@ -159,7 +159,7 @@
+ def OutputData(emoji_data_list, token_dict,
+ token_array_file, string_array_file):
+ """Output token and string arrays to files."""
+- sorted_token_dict = sorted(token_dict.iteritems())
++ sorted_token_dict = sorted(token_dict.items())
+
+ strings = {}
+ for reading, _ in sorted_token_dict:
+@@ -171,7 +171,7 @@
+ strings[docomo_description] = 0
+ strings[softbank_description] = 0
+ strings[kddi_description] = 0
+- sorted_strings = sorted(strings.iterkeys())
++ sorted_strings = sorted(strings.keys())
+ for index, s in enumerate(sorted_strings):
+ strings[s] = index
+
+@@ -205,7 +205,7 @@
+
+ def main():
+ options = ParseOptions()
+- with open(options.input, 'r') as input_stream:
++ with open(options.input, 'rb') as input_stream:
+ (emoji_data_list, token_dict) = ReadEmojiTsv(input_stream)
+
+ OutputData(emoji_data_list, token_dict,
+--- /src/rewriter/gen_reading_correction_data.py
++++ /src/rewriter/gen_reading_correction_data.py
+@@ -63,7 +63,7 @@
+ def WriteData(input_path, output_value_array_path, output_error_array_path,
+ output_correction_array_path):
+ outputs = []
+- with open(input_path) as input_stream:
++ with open(input_path, 'rb') as input_stream:
+ input_stream = code_generator_util.SkipLineComment(input_stream)
+ input_stream = code_generator_util.ParseColumnStream(input_stream,
+ num_column=3)
+@@ -73,7 +73,7 @@
+
+ # In order to lookup the entries via |error| with binary search,
+ # sort outputs here.
+- outputs.sort(lambda x, y: cmp(x[1], y[1]) or cmp(x[0], y[0]))
++ outputs.sort(key=lambda x: (x[1], x[0]))
+
+ serialized_string_array_builder.SerializeToFile(
+ [value for (value, _, _) in outputs], output_value_array_path)
+--- /src/rewriter/gen_single_kanji_rewriter_data.py
++++ /src/rewriter/gen_single_kanji_rewriter_data.py
+@@ -52,7 +52,7 @@
+ stream = code_generator_util.ParseColumnStream(stream, num_column=2)
+ outputs = list(stream)
+ # For binary search by |key|, sort outputs here.
+- outputs.sort(lambda x, y: cmp(x[0], y[0]))
++ outputs.sort(key=lambda x: x[0])
+
+ return outputs
+
+@@ -72,7 +72,7 @@
+ variant_items.append([target, original, len(variant_types) - 1])
+
+ # For binary search by |target|, sort variant items here.
+- variant_items.sort(lambda x, y: cmp(x[0], y[0]))
++ variant_items.sort(key=lambda x: x[0])
+
+ return (variant_types, variant_items)
+
+@@ -151,10 +151,10 @@
+ def main():
+ options = _ParseOptions()
+
+- with open(options.single_kanji_file, 'r') as single_kanji_stream:
++ with open(options.single_kanji_file, 'rb') as single_kanji_stream:
+ single_kanji = ReadSingleKanji(single_kanji_stream)
+
+- with open(options.variant_file, 'r') as variant_stream:
++ with open(options.variant_file, 'rb') as variant_stream:
+ variant_info = ReadVariant(variant_stream)
+
+ WriteSingleKanji(single_kanji,
+--- /src/session/gen_session_stress_test_data.py
++++ /src/session/gen_session_stress_test_data.py
+@@ -50,24 +50,26 @@
+ """
+ result = ''
+ for c in s:
+- hexstr = hex(ord(c))
++ hexstr = hex(c)
+ # because hexstr contains '0x', remove the prefix and add our prefix
+ result += '\\x' + hexstr[2:]
+ return result
+
+ def GenerateHeader(file):
+ try:
+- print "const char *kTestSentences[] = {"
+- for line in open(file, "r"):
+- if line.startswith('#'):
++ print("const char *kTestSentences[] = {")
++ fh = open(file, "rb")
++ for line in fh:
++ if line.startswith(b'#'):
+ continue
+- line = line.rstrip('\r\n')
++ line = line.rstrip(b'\r\n')
+ if not line:
+ continue
+- print " \"%s\"," % escape_string(line)
+- print "};"
++ print(" \"%s\"," % escape_string(line))
++ fh.close()
++ print("};")
+ except:
+- print "cannot open %s" % (file)
++ print("cannot open %s" % (file))
+ sys.exit(1)
+
+ def main():
+--- /src/unix/ibus/gen_mozc_xml.py
++++ /src/unix/ibus/gen_mozc_xml.py
+@@ -74,7 +74,7 @@
+
+
+ def OutputXmlElement(param_dict, element_name, value):
+- print ' <%s>%s</%s>' % (element_name, (value % param_dict), element_name)
++ print(' <%s>%s</%s>' % (element_name, (value % param_dict), element_name))
+
+
+ def OutputXml(param_dict, component, engine_common, engines, setup_arg):
+@@ -90,26 +90,26 @@
+ engines: A dictionary from a property name to a list of property values of
+ engines. For example, {'name': ['mozc-jp', 'mozc', 'mozc-dv']}.
+ """
+- print '<component>'
+- for key in component:
++ print('<component>')
++ for key in sorted(component):
+ OutputXmlElement(param_dict, key, component[key])
+- print '<engines>'
++ print('<engines>')
+ for i in range(len(engines['name'])):
+- print '<engine>'
+- for key in engine_common:
++ print('<engine>')
++ for key in sorted(engine_common):
+ OutputXmlElement(param_dict, key, engine_common[key])
+ if setup_arg:
+ OutputXmlElement(param_dict, 'setup', ' '.join(setup_arg))
+- for key in engines:
++ for key in sorted(engines):
+ OutputXmlElement(param_dict, key, engines[key][i])
+- print '</engine>'
+- print '</engines>'
+- print '</component>'
++ print('</engine>')
++ print('</engines>')
++ print('</component>')
+
+
+ def OutputCppVariable(param_dict, prefix, variable_name, value):
+- print 'const char k%s%s[] = "%s";' % (prefix, variable_name.capitalize(),
+- (value % param_dict))
++ print('const char k%s%s[] = "%s";' % (prefix, variable_name.capitalize(),
++ (value % param_dict)))
+
+
+ def OutputCpp(param_dict, component, engine_common, engines):
+@@ -122,18 +122,18 @@
+ engines: ditto.
+ """
+ guard_name = 'MOZC_UNIX_IBUS_MAIN_H_'
+- print CPP_HEADER % (guard_name, guard_name)
+- for key in component:
++ print(CPP_HEADER % (guard_name, guard_name))
++ for key in sorted(component):
+ OutputCppVariable(param_dict, 'Component', key, component[key])
+- for key in engine_common:
++ for key in sorted(engine_common):
+ OutputCppVariable(param_dict, 'Engine', key, engine_common[key])
+- for key in engines:
+- print 'const char* kEngine%sArray[] = {' % key.capitalize()
++ for key in sorted(engines):
++ print('const char* kEngine%sArray[] = {' % key.capitalize())
+ for i in range(len(engines[key])):
+- print '"%s",' % (engines[key][i] % param_dict)
+- print '};'
+- print 'const size_t kEngineArrayLen = %s;' % len(engines['name'])
+- print CPP_FOOTER % guard_name
++ print('"%s",' % (engines[key][i] % param_dict))
++ print('};')
++ print('const size_t kEngineArrayLen = %s;' % len(engines['name']))
++ print(CPP_FOOTER % guard_name)
+
+
+ def CheckIBusVersion(options, minimum_version):
+--- /src/usage_stats/gen_stats_list.py
++++ /src/usage_stats/gen_stats_list.py
+@@ -37,23 +37,24 @@
+
+ def GetStatsNameList(filename):
+ stats = []
+- for line in open(filename, 'r'):
+- stat = line.strip()
+- if not stat or stat[0] == '#':
+- continue
+- stats.append(stat)
++ with open(filename, 'r') as file:
++ for line in file:
++ stat = line.strip()
++ if not stat or stat[0] == '#':
++ continue
++ stats.append(stat)
+ return stats
+
+
+ def main():
+ stats_list = GetStatsNameList(sys.argv[1])
+- print '// This header file is generated by gen_stats_list.py'
++ print('// This header file is generated by gen_stats_list.py')
+ for stats in stats_list:
+- print 'const char k%s[] = "%s";' % (stats, stats)
+- print 'const char *kStatsList[] = {'
++ print('const char k%s[] = "%s";' % (stats, stats))
++ print('const char *kStatsList[] = {')
+ for stats in stats_list:
+- print ' k%s,' % (stats)
+- print '};'
++ print(' k%s,' % (stats))
++ print('};')
+
+
+ if __name__ == '__main__':
diff --git a/app-i18n/mozc/mozc-2.23.2815.102.ebuild b/app-i18n/mozc/mozc-2.23.2815.102.ebuild
index 425b785cbfab..8a55204d5db9 100644
--- a/app-i18n/mozc/mozc-2.23.2815.102.ebuild
+++ b/app-i18n/mozc/mozc-2.23.2815.102.ebuild
@@ -1,8 +1,8 @@
-# Copyright 2010-2019 Gentoo Authors
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI="7"
-PYTHON_COMPAT=(python2_7)
+PYTHON_COMPAT=(python{3_6,3_7,3_8})
inherit elisp-common multiprocessing python-any-r1 toolchain-funcs
@@ -105,6 +105,10 @@ src_unpack() {
}
src_prepare() {
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_1.patch"
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_2.patch"
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_3.patch"
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_4.patch"
eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-system_libraries.patch"
eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-gcc-8.patch"
eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-protobuf_generated_classes_no_inheritance.patch"
diff --git a/app-i18n/mozc/mozc-9999.ebuild b/app-i18n/mozc/mozc-9999.ebuild
index d7c66b558d92..e82d56df3360 100644
--- a/app-i18n/mozc/mozc-9999.ebuild
+++ b/app-i18n/mozc/mozc-9999.ebuild
@@ -1,8 +1,8 @@
-# Copyright 2010-2019 Gentoo Authors
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI="7"
-PYTHON_COMPAT=(python2_7)
+PYTHON_COMPAT=(python{3_6,3_7,3_8})
inherit elisp-common multiprocessing python-any-r1 toolchain-funcs
@@ -105,6 +105,10 @@ src_unpack() {
}
src_prepare() {
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_1.patch"
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_2.patch"
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_3.patch"
+ eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-python-3_4.patch"
eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-system_libraries.patch"
eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-gcc-8.patch"
eapply -p2 "${FILESDIR}/${PN}-2.23.2815.102-protobuf_generated_classes_no_inheritance.patch"
diff --git a/app-i18n/tagainijisho/Manifest b/app-i18n/tagainijisho/Manifest
index b91fbe4731db..6aa6fe5974fc 100644
--- a/app-i18n/tagainijisho/Manifest
+++ b/app-i18n/tagainijisho/Manifest
@@ -1,15 +1,7 @@
-DIST JMdict-2018-06-12.gz 18306650 BLAKE2B 3ddb29bf631567717085912b1fa21c701e6183c94907f64121daedf358303df09a754bcdf51f24f0d7c95bf1bfe4ea13779f5e3532bd19a3f99a33751259a893 SHA512 cd8e0a737ccd1ccbe7c53ab0affb5d4ca068656966c75f0abbc184ffcdc69d8a1dbe0f096a16f0b59ecaed40371a29e566ee5076a7e80e3f51796e32798bed3e
-DIST JMdict-2019-10-01.gz 19418133 BLAKE2B a7f3fb45fafaa0d5fdaa7366d2d299cd872edfc6834743529604cb2803f60112daccf82b6c48db0e100b52fb31b1fe2705369fb8a1c2b83c0eba65176ef59d90 SHA512 033968dfc3504611db23269361d72564fbf81a59d7f1ed94f4303c51748f4f31a2b38b3c2a6d846941a886f357d5dd320bc0f043a151ac1c70e43df9c594a599
DIST JMdict-2020-01-01.gz 19493878 BLAKE2B a797cbabffaafdcc967078ebf91ba27280d9a2ba040021fa605e674246ad3837774b2f17a7f4386cec3a2f53afb742fcd9f0bb6784b2014659d8785d59e321cd SHA512 691077030421e909dc171c217e6951edd48b567bf0821c7b68da7a2fc410e3f55d73db1943a9b99128763655d03e175e431590d869032645ae32d69dae5acdc1
-DIST kanjidic2-2018-06-12.xml.gz 1482391 BLAKE2B c6f1d18f17b2107937fe07b77d6aa297d156d9c250cdc480000aeb7f8c9cd27a94e2cce1df80e4612d1bcadc94d92d5d013cfc8d923c5ceaba9ed5b0a692ae78 SHA512 700aba18e18ab271dbc583f1a1cc16dd7209e4aa48417201f0b51723d35f5aedcff299ed1503cb743151abce37c94c0facbab09dd0a46126131bb54f2004aa23
-DIST kanjidic2-2019-10-01.xml.gz 1482675 BLAKE2B faf2ce4367796b18c3e858ef05203610810e38e44cf3b6e06fceda4204809fd366f6c16604618bf8b668761c9ac2045ed18484a374f2648cc88ba43fa5e5eb7b SHA512 aff301e824a1c31ff4850f709ee6c4ee6a0a54aa4823a5fcb0c5419d7b96239c16bd091f5557f02e2b7c3d0ced8cb2ae19805723c46f9a3a906adecc20ec631c
DIST kanjidic2-2020-01-01.xml.gz 1482692 BLAKE2B f50f45e982842b1fc161551640aa7bb8bc13e858c64576af25101c0bbdcb5d5734ebba86be180434781ba62d37370eae9b53bad7a30eb06bcffb86233f85f3fe SHA512 df0d0827525ff0406b33811f9b0eb24f7ab99288879dc316af8125c24448b9919224d6cc7c20648791dbbe8f29d05d977e77c9316a9aeb69513e29cf2bc40e92
DIST kanjivg-20160426.xml.gz 3587660 BLAKE2B c85291f3671f0980186e8deb296cfa743322df438d6f09fd69a4f5a30c3db6f097d6ff0a817b88c5c9a2c8d4a40c90b31c0a54cb71d681849b557ee364707c48 SHA512 1d35a8cc5d4947f6bedfc71a2f08e2d1fda832d7a02588c00953e1746812b0e51d4d1c39dff49999b4f65ec850788359f6e0d1465f0e688b1bf753c4c67b3c54
-DIST tagainijisho-1.2.0_pre20180610092832.tar.gz 3634107 BLAKE2B f5cb75f45fced30c64f615147c3eb7e4d473e81dd734517710bcfdf390d1a34a1780196b6dbbb7a6d297df0bb8d73933d09a76eb6b8dd13d1c0759c562e81695 SHA512 3b17ff6972e3cfd35f4f9e8c575153a4c2021a66a238f631616452a16a151d604106586d438cdc574211bff54118d89e492f9460da4b25fef88a2354b0bd8555
-DIST tagainijisho-1.2.0_pre20190507124027.tar.gz 3634210 BLAKE2B ec11eba3e20f2a1d55b24c67b95bd98d9ec96a7476533bc42d56866c87f03fd8d58133b5f6aa4e6f37a7ca4dd8b447a0414f0b9651adaa6fb0141c6df4b61073 SHA512 ace6fd4ff46d1bea329f3c85ea6df63583f256d5a26b78e3d64d47be2a37f74b262c0b6254fad4b258b6bb1cb3eff3bb4a4ed712f9ee72f21810717ce1a8f3d8
DIST tagainijisho-1.2.0_pre20191230120610.tar.gz 3635034 BLAKE2B 4f86febae1dfe1de236668bd405294c1e11510b8a942abfcb378d0e46c38cc179d16f5365dbb8322c7ad47cc333a816ce883af5993e0be90f11a43f2710cd45d SHA512 4fa93b7a51415c57f1d81748f15f23e90e4d9a1f91c3f0d96447d08a5bf1bbfc6bc2dbd4083e0b9328e32795f01de94bae834b4e7ff1830cfbe0ec215f20d7f8
-EBUILD tagainijisho-1.2.0_pre20180610092832.ebuild 6180 BLAKE2B ac53048f5508366e943275fdf19ce7dfc7e5ccc4d228608d57828098911a5219ac89b0f4f5450cfba87dadd06fcde209438834e4264286b257cf98dba6e26bad SHA512 8ed6439eae16cea8e714fe8a5de659286414bbaffccc6b3c9a0c79d33a36534039b6a56a5e8fa936556a9264c2e4b164841ced3c3805f21da82928621198baab
-EBUILD tagainijisho-1.2.0_pre20190507124027_p20191001_p20191001.ebuild 6662 BLAKE2B 08869518725b5f883b75e2fbb1520c1db6c77ecafcd18dfb702b4c887103583f0def09895a0c62b2b64afb9597ec10bab9220d5ed1276c245434d4bd217564f4 SHA512 cf8efd353a6e9df25b4f198026c83b788a66f8b4a1b61dfddd3b77eb474a821fc4361a49c7b69905e268dedf59427cfe609fd97b690eaea67dfab230232aeb85
EBUILD tagainijisho-1.2.0_pre20191230120610_p20200101_p20200101.ebuild 6662 BLAKE2B b55c84d61fef76786a82f67904b1b4b058a430023f05a9521d0c1783e4834d0333362de53fb791e907451fb73e593ade756fad600363d2619774c9d18a69a70b SHA512 b3055cbbbd23ae22e827181a6c11e17943d3c604a2453afb6fb389c0d4a877f77585242d39c747e3a9cee81df52ce1754aac3e7616051dbfeab84e07845cf09c
EBUILD tagainijisho-9999.ebuild 6611 BLAKE2B 49eb2ad3a99a2cae8097c3fbe54f99b7d63fe1ba2b88793bfe9a57f38afc04ae4ccd5297642603cc55203ec461d2b56885cf0a504c75b9319194df878d27b752 SHA512 ff98dc941da2ae88e664564a5fbf674e75f5da2e8d1501dac23c294bc1ed718f6b8e7252c9e889cc9b3519217b9a2b37a5e6f38dd828dce7f269296af316311d
MISC metadata.xml 1133 BLAKE2B 863fcd0cbb85b715cf7666210c74a9e88423b0e982b3e05b4333036b607e4d2cf73b35dc6b8f5e549a35a6d8e2cc107f110f3500762d49065bd1ed2097dd6338 SHA512 cde2818f44f02091ac994ca049a25b90519b62b669b7923a52043df23a0de7f3e1f657d3080e574048556dbacd5521995addd35267ec9d1357360f473222cef7
diff --git a/app-i18n/tagainijisho/tagainijisho-1.2.0_pre20180610092832.ebuild b/app-i18n/tagainijisho/tagainijisho-1.2.0_pre20180610092832.ebuild
deleted file mode 100644
index 41902e9a643c..000000000000
--- a/app-i18n/tagainijisho/tagainijisho-1.2.0_pre20180610092832.ebuild
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 1999-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI="6"
-
-inherit cmake-utils gnome2-utils
-
-if [[ "${PV}" == 9999 ]]; then
- inherit git-r3
-
- EGIT_REPO_URI="https://github.com/Gnurou/tagainijisho"
-elif [[ "${PV}" == *_pre* ]]; then
- inherit vcs-snapshot
-
- TAGAINIJISHO_GIT_REVISION="dde2ad85dc6dc47ac7862e728878a7c8d9d4faf6"
-fi
-if [[ "${PV}" == 9999 || "${PV}" == *_pre* ]]; then
- JMDICT_DATE="2018-06-12"
- KANJIDIC2_DATE="2018-06-12"
- KANJIVG_VERSION="20160426"
-fi
-
-DESCRIPTION="Open-source Japanese dictionary and kanji lookup tool"
-HOMEPAGE="https://www.tagaini.net/ https://github.com/Gnurou/tagainijisho"
-if [[ "${PV}" == 9999 ]]; then
- SRC_URI=""
-elif [[ "${PV}" == *_pre* ]]; then
- SRC_URI="https://github.com/Gnurou/${PN}/archive/${TAGAINIJISHO_GIT_REVISION}.tar.gz -> ${P}.tar.gz"
-else
- SRC_URI="https://github.com/Gnurou/${PN}/releases/download/${PV}/${P}.tar.gz"
-fi
-if [[ "${PV}" == *_pre* ]]; then
- # Upstream: http://ftp.monash.edu.au/pub/nihongo/JMdict.gz
- SRC_URI+=" https://home.apache.org/~arfrever/distfiles/JMdict-${JMDICT_DATE}.gz"
-fi
-if [[ "${PV}" == *_pre* ]]; then
- # Upstream: http://www.edrdg.org/kanjidic/kanjidic2.xml.gz
- SRC_URI+=" https://home.apache.org/~arfrever/distfiles/kanjidic2-${KANJIDIC2_DATE}.xml.gz"
-fi
-if [[ "${PV}" == 9999 || "${PV}" == *_pre* ]]; then
- SRC_URI+=" https://github.com/KanjiVG/kanjivg/releases/download/r${KANJIVG_VERSION}/kanjivg-${KANJIVG_VERSION}.xml.gz"
-fi
-
-LICENSE="GPL-3+ public-domain"
-SLOT="0"
-KEYWORDS="~amd64 ~x86"
-IUSE=""
-
-RDEPEND=">=dev-db/sqlite-3.12:3
- dev-qt/qtcore:5
- dev-qt/qtnetwork:5
- dev-qt/qtprintsupport:5
- dev-qt/qtwidgets:5"
-DEPEND="${RDEPEND}
- dev-qt/linguist-tools:5"
-
-pkg_langs=(ar cs de es fa fi fr hu id it nb nl pl pt ru sv th tr uk vi zh)
-IUSE+=" ${pkg_langs[@]/#/l10n_}"
-unset pkg_langs
-
-src_unpack() {
- if [[ "${PV}" == 9999 ]]; then
- git-r3_src_unpack
- elif [[ "${PV}" == *_pre* ]]; then
- unpack ${P}.tar.gz
- mv tagainijisho-${TAGAINIJISHO_GIT_REVISION} ${P} || die
- else
- unpack ${P}.tar.gz
- fi
-
- if [[ "${PV}" == 9999 ]]; then
- # JMdict.gz and kanjidic2.xml.gz are updated once per day.
-
- local distdir="${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}"
- local today="$(TZ="UTC" date --date=today "+%Y-%m-%d")"
- local yesterday="$(TZ="UTC" date --date=yesterday "+%Y-%m-%d")"
-
- if [[ -f ${distdir}/JMdict-${today}.gz && -s ${distdir}/JMdict-${today}.gz ]]; then
- # Use previously downloaded file from today.
- JMDICT_DATE="${today}"
- elif [[ -f ${distdir}/JMdict-${yesterday}.gz && -s ${distdir}/JMdict-${yesterday}.gz ]]; then
- # Use previously downloaded file from yesterday. File from today may still be nonexistent.
- JMDICT_DATE="${yesterday}"
- else
- # Download file from today or yesterday.
- wget http://ftp.monash.edu.au/pub/nihongo/JMdict.gz -O JMdict.gz || die
- JMDICT_DATE="$(gzip -cd JMdict.gz | grep -E "^<!-- JMdict created: [[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2} -->$" | sed -e "s/.*\([[:digit:]]\{4\}-[[:digit:]]\{2\}-[[:digit:]]\{2\}\).*/\1/")"
- if [[ ${JMDICT_DATE} != ${today} && ${JMDICT_DATE} != ${yesterday} ]]; then
- die "Unexpected date in JMdict.gz: '${JMDICT_DATE}'"
- fi
- (
- addwrite "${distdir}"
- mv JMdict.gz "${distdir}/JMdict-${JMDICT_DATE}.gz" || die
- )
- fi
- einfo "Date in JMdict.gz: '${JMDICT_DATE}'"
-
- if [[ -f ${distdir}/kanjidic2-${today}.xml.gz && -s ${distdir}/kanjidic2-${today}.xml.gz ]]; then
- # Use previously downloaded file from today.
- KANJIDIC2_DATE="${today}"
- elif [[ -f ${distdir}/kanjidic2-${yesterday}.xml.gz && -s ${distdir}/kanjidic2-${yesterday}.xml.gz ]]; then
- # Use previously downloaded file from yesterday. File from today may still be nonexistent.
- KANJIDIC2_DATE="${yesterday}"
- else
- # Download file from today or yesterday.
- wget http://www.edrdg.org/kanjidic/kanjidic2.xml.gz -O kanjidic2.xml.gz || die
- KANJIDIC2_DATE="$(gzip -cd kanjidic2.xml.gz | grep -E "^<date_of_creation>[[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2}</date_of_creation>$" | sed -e "s/.*\([[:digit:]]\{4\}-[[:digit:]]\{2\}-[[:digit:]]\{2\}\).*/\1/")"
- if [[ ${KANJIDIC2_DATE} != ${today} && ${KANJIDIC2_DATE} != ${yesterday} ]]; then
- die "Unexpected date in kanjidic2.xml.gz: '${KANJIDIC2_DATE}'"
- fi
- (
- addwrite "${distdir}"
- mv kanjidic2.xml.gz "${distdir}/kanjidic2-${KANJIDIC2_DATE}.xml.gz" || die
- )
- fi
- einfo "Date in kanjidic2.xml.gz: '${KANJIDIC2_DATE}'"
-
- mkdir "${S}/3rdparty" || die
- gzip -cd "${distdir}/JMdict-${JMDICT_DATE}.gz" > "${S}/3rdparty/JMdict" || die
- gzip -cd "${distdir}/kanjidic2-${KANJIDIC2_DATE}.xml.gz" > "${S}/3rdparty/kanjidic2.xml" || die
- elif [[ "${PV}" == *_pre* ]]; then
- mkdir "${S}/3rdparty" || die
- pushd "${S}/3rdparty" > /dev/null || die
-
- unpack JMdict-${JMDICT_DATE}.gz
- mv JMdict-${JMDICT_DATE} JMdict || die
-
- unpack kanjidic2-${KANJIDIC2_DATE}.xml.gz
- mv kanjidic2-${KANJIDIC2_DATE}.xml kanjidic2.xml || die
-
- popd > /dev/null || die
- fi
-
- if [[ "${PV}" == 9999 || "${PV}" == *_pre* ]]; then
- pushd "${S}/3rdparty" > /dev/null || die
-
- unpack kanjivg-${KANJIVG_VERSION}.xml.gz
- mv kanjivg-${KANJIVG_VERSION}.xml kanjivg.xml || die
-
- popd > /dev/null || die
- fi
-}
-
-src_configure() {
- # GUI linguae
- # en is not optional, and build fails if none other than en is set, so adding ja as non-optional too.
- local lang use_lang
- for lang in i18n/*.ts; do
- lang=${lang#i18n/tagainijisho_}
- lang=${lang%.ts}
- case ${lang} in
- fa_IR|fi_FI|pt_BR)
- # Use generic tags.
- use_lang=${lang%%_*}
- ;;
- *)
- use_lang=${lang}
- ;;
- esac
-
- if [[ ${lang} != en && ${lang} != ja ]] && ! use l10n_${use_lang}; then
- rm i18n/tagainijisho_${lang}.ts || die
- fi
- done
-
- # Dictionary linguae
- # en is not optional here either, but nothing special needs to be done.
- local dict_langs
- for lang in $(sed -e 's/;/ /g' -ne '/set(DICT_LANG ".*")/s/.*"\(.*\)".*/\1/p' CMakeLists.txt); do
- if use l10n_${lang}; then
- dict_langs+="${dict_langs:+;}${lang}"
- fi
- done
-
- local mycmakeargs=(
- -DDICT_LANG="${dict_langs:-;}"
- -DEMBED_SQLITE=OFF
- )
-
- cmake-utils_src_configure
-}
-
-pkg_postinst() {
- gnome2_icon_cache_update
-}
-
-pkg_postrm() {
- gnome2_icon_cache_update
-}
diff --git a/app-i18n/tagainijisho/tagainijisho-1.2.0_pre20190507124027_p20191001_p20191001.ebuild b/app-i18n/tagainijisho/tagainijisho-1.2.0_pre20190507124027_p20191001_p20191001.ebuild
deleted file mode 100644
index 9f4d2eb48253..000000000000
--- a/app-i18n/tagainijisho/tagainijisho-1.2.0_pre20190507124027_p20191001_p20191001.ebuild
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2014-2019 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI="7"
-
-inherit cmake-utils xdg-utils
-
-if [[ "${PV}" == 9999 ]]; then
- inherit git-r3
-
- EGIT_REPO_URI="https://github.com/Gnurou/tagainijisho"
-elif [[ "${PV}" == *_pre* ]]; then
- inherit vcs-snapshot
-
- TAGAINIJISHO_GIT_REVISION="d189cfcb389b8b7f481ea452fcdf1987514d60d7"
-fi
-if [[ "${PV}" != 9999 ]]; then
- TAGAINIJISHO_VERSION="${PV%_p*_p*}"
- JMDICT_DATE="${PV#${TAGAINIJISHO_VERSION}_p}"
- JMDICT_DATE="${JMDICT_DATE%_p*}"
- JMDICT_DATE="${JMDICT_DATE:0:4}-${JMDICT_DATE:4:2}-${JMDICT_DATE:6}"
- KANJIDIC2_DATE="${PV#${TAGAINIJISHO_VERSION}_p*_p}"
- KANJIDIC2_DATE="${KANJIDIC2_DATE:0:4}-${KANJIDIC2_DATE:4:2}-${KANJIDIC2_DATE:6}"
-fi
-if [[ "${PV}" == 9999 || "${PV}" == *_pre* ]]; then
- KANJIVG_VERSION="20160426"
-fi
-
-DESCRIPTION="Open-source Japanese dictionary and kanji lookup tool"
-HOMEPAGE="https://www.tagaini.net/ https://github.com/Gnurou/tagainijisho"
-if [[ "${PV}" == 9999 ]]; then
- SRC_URI=""
-elif [[ "${PV}" == *_pre* ]]; then
- SRC_URI="https://github.com/Gnurou/${PN}/archive/${TAGAINIJISHO_GIT_REVISION}.tar.gz -> ${PN}-${TAGAINIJISHO_VERSION}.tar.gz"
-else
- SRC_URI="https://github.com/Gnurou/${PN}/releases/download/${PV}/${PN}-${TAGAINIJISHO_VERSION}.tar.gz"
-fi
-if [[ "${PV}" != 9999 ]]; then
- # Upstream: http://ftp.monash.edu.au/pub/nihongo/JMdict.gz
- SRC_URI+=" https://home.apache.org/~arfrever/distfiles/JMdict-${JMDICT_DATE}.gz"
- # Upstream: http://www.edrdg.org/kanjidic/kanjidic2.xml.gz
- SRC_URI+=" https://home.apache.org/~arfrever/distfiles/kanjidic2-${KANJIDIC2_DATE}.xml.gz"
-fi
-if [[ "${PV}" == 9999 || "${PV}" == *_pre* ]]; then
- SRC_URI+=" https://github.com/KanjiVG/kanjivg/releases/download/r${KANJIVG_VERSION}/kanjivg-${KANJIVG_VERSION}.xml.gz"
-fi
-
-LICENSE="GPL-3+ public-domain"
-SLOT="0"
-KEYWORDS="~amd64 ~x86"
-IUSE=""
-if [[ "${PV}" == 9999 ]]; then
- PROPERTIES="live"
-fi
-
-BDEPEND="dev-qt/linguist-tools:5"
-DEPEND=">=dev-db/sqlite-3.12:3
- dev-qt/qtcore:5
- dev-qt/qtnetwork:5
- dev-qt/qtprintsupport:5
- dev-qt/qtwidgets:5"
-RDEPEND="${DEPEND}"
-
-pkg_langs=(ar cs de es fa fi fr hu id it nb nl pl pt ru sv th tr uk vi zh)
-IUSE+=" ${pkg_langs[@]/#/l10n_}"
-unset pkg_langs
-
-if [[ "${PV}" != 9999 ]]; then
- S="${WORKDIR}/${PN}-${TAGAINIJISHO_VERSION}"
-fi
-
-src_unpack() {
- if [[ "${PV}" == 9999 ]]; then
- git-r3_src_unpack
- elif [[ "${PV}" == *_pre* ]]; then
- unpack ${PN}-${TAGAINIJISHO_VERSION}.tar.gz
- mv ${PN}-${TAGAINIJISHO_GIT_REVISION} ${PN}-${TAGAINIJISHO_VERSION} || die
- else
- unpack ${PN}-${TAGAINIJISHO_VERSION}.tar.gz
- fi
-
- if [[ "${PV}" == 9999 ]]; then
- # JMdict.gz and kanjidic2.xml.gz are updated once per day.
-
- local distdir="${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}"
- local today="$(TZ="UTC" date --date=today "+%Y-%m-%d")"
- local yesterday="$(TZ="UTC" date --date=yesterday "+%Y-%m-%d")"
-
- if [[ -f ${distdir}/JMdict-${today}.gz && -s ${distdir}/JMdict-${today}.gz ]]; then
- # Use previously downloaded file from today.
- JMDICT_DATE="${today}"
- elif [[ -f ${distdir}/JMdict-${yesterday}.gz && -s ${distdir}/JMdict-${yesterday}.gz ]]; then
- # Use previously downloaded file from yesterday. File from today may still be nonexistent.
- JMDICT_DATE="${yesterday}"
- else
- # Download file from today or yesterday.
- wget http://ftp.monash.edu.au/pub/nihongo/JMdict.gz -O JMdict.gz || die
- JMDICT_DATE="$(gzip -cd JMdict.gz | grep -E "^<!-- JMdict created: [[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2} -->$" | sed -e "s/.*\([[:digit:]]\{4\}-[[:digit:]]\{2\}-[[:digit:]]\{2\}\).*/\1/")"
- if [[ ${JMDICT_DATE} != ${today} && ${JMDICT_DATE} != ${yesterday} ]]; then
- die "Unexpected date in JMdict.gz: '${JMDICT_DATE}'"
- fi
- (
- addwrite "${distdir}"
- mv JMdict.gz "${distdir}/JMdict-${JMDICT_DATE}.gz" || die
- )
- fi
- einfo "Date in JMdict.gz: '${JMDICT_DATE}'"
-
- if [[ -f ${distdir}/kanjidic2-${today}.xml.gz && -s ${distdir}/kanjidic2-${today}.xml.gz ]]; then
- # Use previously downloaded file from today.
- KANJIDIC2_DATE="${today}"
- elif [[ -f ${distdir}/kanjidic2-${yesterday}.xml.gz && -s ${distdir}/kanjidic2-${yesterday}.xml.gz ]]; then
- # Use previously downloaded file from yesterday. File from today may still be nonexistent.
- KANJIDIC2_DATE="${yesterday}"
- else
- # Download file from today or yesterday.
- wget http://www.edrdg.org/kanjidic/kanjidic2.xml.gz -O kanjidic2.xml.gz || die
- KANJIDIC2_DATE="$(gzip -cd kanjidic2.xml.gz | grep -E "^<date_of_creation>[[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2}</date_of_creation>$" | sed -e "s/.*\([[:digit:]]\{4\}-[[:digit:]]\{2\}-[[:digit:]]\{2\}\).*/\1/")"
- if [[ ${KANJIDIC2_DATE} != ${today} && ${KANJIDIC2_DATE} != ${yesterday} ]]; then
- die "Unexpected date in kanjidic2.xml.gz: '${KANJIDIC2_DATE}'"
- fi
- (
- addwrite "${distdir}"
- mv kanjidic2.xml.gz "${distdir}/kanjidic2-${KANJIDIC2_DATE}.xml.gz" || die
- )
- fi
- einfo "Date in kanjidic2.xml.gz: '${KANJIDIC2_DATE}'"
-
- mkdir "${S}/3rdparty" || die
- gzip -cd "${distdir}/JMdict-${JMDICT_DATE}.gz" > "${S}/3rdparty/JMdict" || die
- gzip -cd "${distdir}/kanjidic2-${KANJIDIC2_DATE}.xml.gz" > "${S}/3rdparty/kanjidic2.xml" || die
- else
- mkdir "${S}/3rdparty" || die
- pushd "${S}/3rdparty" > /dev/null || die
-
- unpack JMdict-${JMDICT_DATE}.gz
- mv JMdict-${JMDICT_DATE} JMdict || die
-
- unpack kanjidic2-${KANJIDIC2_DATE}.xml.gz
- mv kanjidic2-${KANJIDIC2_DATE}.xml kanjidic2.xml || die
-
- popd > /dev/null || die
- fi
-
- if [[ "${PV}" == 9999 || "${PV}" == *_pre* ]]; then
- pushd "${S}/3rdparty" > /dev/null || die
-
- unpack kanjivg-${KANJIVG_VERSION}.xml.gz
- mv kanjivg-${KANJIVG_VERSION}.xml kanjivg.xml || die
-
- popd > /dev/null || die
- fi
-}
-
-src_configure() {
- # GUI linguae
- # en is not optional, and build fails if none other than en is set, so adding ja as non-optional too.
- local lang use_lang
- for lang in i18n/*.ts; do
- lang=${lang#i18n/tagainijisho_}
- lang=${lang%.ts}
- case ${lang} in
- fa_IR|fi_FI|pt_BR)
- # Use generic tags.
- use_lang=${lang%%_*}
- ;;
- *)
- use_lang=${lang}
- ;;
- esac
-
- if [[ ${lang} != en && ${lang} != ja ]] && ! use l10n_${use_lang}; then
- rm i18n/tagainijisho_${lang}.ts || die
- fi
- done
-
- # Dictionary linguae
- # en is not optional here either, but nothing special needs to be done.
- local dict_langs
- for lang in $(sed -e 's/;/ /g' -ne '/set(DICT_LANG ".*")/s/.*"\(.*\)".*/\1/p' CMakeLists.txt); do
- if use l10n_${lang}; then
- dict_langs+="${dict_langs:+;}${lang}"
- fi
- done
-
- local mycmakeargs=(
- -DDICT_LANG="${dict_langs:-;}"
- -DEMBED_SQLITE=OFF
- )
-
- cmake-utils_src_configure
-}
-
-pkg_postinst() {
- xdg_icon_cache_update
-}
-
-pkg_postrm() {
- xdg_icon_cache_update
-}