File size: 84,186 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
{
    "paper_id": "R11-1024",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T15:03:46.668927Z"
    },
    "title": "A Named Entity Recognition Method using Rules Acquired from Unlabeled Data",
    "authors": [
        {
            "first": "Tomoya",
            "middle": [],
            "last": "Iwakura",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Fujitsu Laboratories Ltd",
                "location": {
                    "addrLine": "1-1, Kamikodanaka 4-chome, Nakahara-ku",
                    "postCode": "211-8588",
                    "settlement": "Kawasaki",
                    "country": "Japan"
                }
            },
            "email": "iwakura.tomoya@jp.fujitsu.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We propose a Named Entity (NE) recognition method using rules acquired from unlabeled data. Rules are acquired from automatically labeled data with an NE recognizer. These rules are used to identify NEs, the beginning of NEs, or the end of NEs. The application results of rules are used as features for machine learning based NE recognizers. In addition, we use word information acquired from unlabeled data as in a previous work. The word information includes the candidate NE classes of each word, the candidate NE classes of co-occurring words of each word, and so on. We evaluate our method with IREX data set for Japanese NE recognition and unlabeled data consisting of more than one billion words. The experimental results show that our method using rules and word information achieves the best accuracy on the GENERAL and ARREST tasks of IREX.",
    "pdf_parse": {
        "paper_id": "R11-1024",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We propose a Named Entity (NE) recognition method using rules acquired from unlabeled data. Rules are acquired from automatically labeled data with an NE recognizer. These rules are used to identify NEs, the beginning of NEs, or the end of NEs. The application results of rules are used as features for machine learning based NE recognizers. In addition, we use word information acquired from unlabeled data as in a previous work. The word information includes the candidate NE classes of each word, the candidate NE classes of co-occurring words of each word, and so on. We evaluate our method with IREX data set for Japanese NE recognition and unlabeled data consisting of more than one billion words. The experimental results show that our method using rules and word information achieves the best accuracy on the GENERAL and ARREST tasks of IREX.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Named Entity (NE) recognition aims to recognize proper nouns and numerical expressions in text, such as names of people, locations, organizations, dates, times, and so on. NE recognition is one of the basic technologies used in text processing such as Information Extraction and Question Answering.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "To implement NE recognizers, semisupervised-based methods have recently been widely applied. These methods use several different types of information obtained from unlabeled data, such as word clusters (Freitag, 2004; Miller et al., 2004) , the clusters of multi-word nouns (Kazama and Torisawa, 2008) , phrase clusters (Lin and Wu, 2009) , hyponymy relations extracted from WikiPedia (Kazama and Torisawa, 2008) , NE-related word information (Iwakura, 2010) , and the outputs of classifiers or parsers created from unlabeled data (Ando and Zhang, 2005) . These previous works have shown that features acquired from large sets of unlabeled data can contribute to improved accuracy. From the results of these previous works, we see that several types of features augmented with unlabeled data contribute to improved accuracy.",
                "cite_spans": [
                    {
                        "start": 202,
                        "end": 217,
                        "text": "(Freitag, 2004;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 218,
                        "end": 238,
                        "text": "Miller et al., 2004)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 274,
                        "end": 301,
                        "text": "(Kazama and Torisawa, 2008)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 320,
                        "end": 338,
                        "text": "(Lin and Wu, 2009)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 375,
                        "end": 412,
                        "text": "WikiPedia (Kazama and Torisawa, 2008)",
                        "ref_id": null
                    },
                    {
                        "start": 443,
                        "end": 458,
                        "text": "(Iwakura, 2010)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 531,
                        "end": 553,
                        "text": "(Ando and Zhang, 2005)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Therefore, if we can incorporate new features augmented with unlabeled data, we expect more improved accuracy.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We propose a Named Entity recognition method using rules acquired from unlabeled data. Our method uses rules identifying not only whole NEs, but also the beginning of NEs or the end of NEs. Rules are acquired from automatically labeled data with an NE recognizer. The application results of rules are used as features for machine-learning based NE recognitions. Compared with previous works using rules identifying NEs acquired from manually labeled data (Isozaki, 2001) , or lists of NEs acquired from unlabeled data (Talukdar et al., 2006) , our method uses new features such as identification results of the beginning of NEs and the end of NEs. In addition, we use word information (Iwakura, 2010) . The word information includes the candidate NE classes of each word, the candidate NE classes of co-occurring words of each word, and so on. The word information is also acquired from automatically labeled data with an NE recognizer.",
                "cite_spans": [
                    {
                        "start": 455,
                        "end": 470,
                        "text": "(Isozaki, 2001)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 518,
                        "end": 541,
                        "text": "(Talukdar et al., 2006)",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 685,
                        "end": 700,
                        "text": "(Iwakura, 2010)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We report experimental results with the IREX Japanese NE recognition data set (IREX, 1999) . The experimental results show that our method using rules and word information achieves the best accuracy on the GENERAL and ARREST tasks. The experimental results also show that our method contributes to fast improvement of accuracy compared with only using manually labeled ",
                "cite_spans": [
                    {
                        "start": 78,
                        "end": 90,
                        "text": "(IREX, 1999)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "This section describes our NE recognition method that combines both word-based and characterbased NE recognitions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Japanese Named Entity Recognition",
                "sec_num": "2"
            },
            {
                "text": "Each NE consists of one or more words. To recognize NEs, we have to identify word chunks with their NE classes. We use Start/End (SE) representation (Uchimoto et al., 2000) because an SE representation-based NE recognizer shows the best performance among previous works (Sasano and Kurohashi, 2008) . SE representation uses five tags which are S, B, I, E and O, for representing chunks. S means that the current word is a chunk consisting of only one word. B means the start of a chunk consisting of more than one word. E means the end of a chunk consisting of more than one word. I means the inside of a chunk consisting of more than two words. O means the outside of any chunk. We use the IREX Japanese NE recognition task for our evaluation. The task is to recognize the eight NE classes. The SE based NE label set for IREX task has (8 \u00d7 4) + 1 = 33 labels such as B-PERSON, S-PERSON, and so on.",
                "cite_spans": [
                    {
                        "start": 149,
                        "end": 172,
                        "text": "(Uchimoto et al., 2000)",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 270,
                        "end": 298,
                        "text": "(Sasano and Kurohashi, 2008)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Chunk Representation",
                "sec_num": "2.1"
            },
            {
                "text": "We classify each word into one of the NE labels defined by the SE representation for recognizing NEs. Japanese has no word boundary marker. To segment words from Japanese texts, we use MeCab 0.98 with ipadic-2.7.0. 1 Our NE recognizer uses features extracted from the current word, the preceding two words and the two succeeding words (5-word window). The basic features are the word surfaces, the last characters, the base-forms, the readings, the POS tags, and the character types of words within 5-word window size. The base-forms, the readings, and the POS tags are given by MeCab. Base-forms are representative expressions for conjugational words. If the base-form of each word is not equivalent to the word surface, we use the base-form as a feature. If a word consists of only one character, the character type is expressed by using the corresponding character types listed in Table 1 . If a word consists of more than one character, the character type is expressed by a combination of the basic character types listed in Table 1 , such as Kanji-Hiragana. MeCab uses the set of POS tags having at most four levels of subcategories. We use all the levels of POS tags as POS tag features.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 884,
                        "end": 891,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    },
                    {
                        "start": 1029,
                        "end": 1036,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Word-based NE Recognition",
                "sec_num": "2.2"
            },
            {
                "text": "We use outputs of rules to a current word and word information within 5-word window size as features. The rules and the word information are acquired from automatically labeled data with an NE recognizer. We describe rules in section 3. We use the following NE-related labels of words from unlabeled data as word information as in (Iwakura, 2010) .",
                "cite_spans": [
                    {
                        "start": 331,
                        "end": 346,
                        "text": "(Iwakura, 2010)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word-based NE Recognition",
                "sec_num": "2.2"
            },
            {
                "text": "Candidate NE labels: We use NE labels assigned to each word more than or equal to 50 times as candidate NE labels of words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word-based NE Recognition",
                "sec_num": "2.2"
            },
            {
                "text": "Candidate co-occurring NE labels: We use NE labels assigned to co-occurring words of each word more than or equal to 50 times as candidate co-occurring NE labels of the word.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word-based NE Recognition",
                "sec_num": "2.2"
            },
            {
                "text": "Frequency information of candidate NE labels and candidate co-occurring NE labels: These are the frequencies of the NE candidate labels of each word on the automatically labeled data. We categorize the frequencies of these NErelated labels by the frequency of each word n; 50 \u2264 n \u2264 100, 100 < n \u2264 500, 500 < n \u2264 1000, 1000 < n \u2264 5000, 5000 < n \u2264 10000, 10000 < n \u2264 50000, 50000 < n \u2264 100000, and 100000 < n.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word-based NE Recognition",
                "sec_num": "2.2"
            },
            {
                "text": "Ranking of candidate NE labels: This information is the ranking of candidate NE class labels for each word. Each ranking is decided according to the label frequencies.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word-based NE Recognition",
                "sec_num": "2.2"
            },
            {
                "text": "For example, we obtain the following statistics from automatically labeled data with an NE recognizer for Tanaka: S-PERSON was assigned to Tanaka 10,000 times, B-PERSON was assigned to Tanaka 1,000 times, and I-PERSON was assigned to words appearing next to Tanaka 1,000 times. The following NE-related labels are acquired for Tanaka: Candidate NE labels are S-PERSON and B-ORGANIZATION. Frequency information of candidate NE labels are 5000 < n \u2264 10000 for S-PERSON, and 500 < n \u2264 1000 for B-ORGANIZATION. The ranking of candidate NE labels are the first for S-PERSON, and second for B-ORGANIZATION. Candidate co-occurring NE labels at the next word position is I-PERSON. Frequency information of candidate co-occurring NE labels at the next word position is 500 < n \u2264 1000 for I-PERSON.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word-based NE Recognition",
                "sec_num": "2.2"
            },
            {
                "text": "Japanese NEs sometimes include partial words that form the beginning, the end of NE chunks or whole NEs. 2 To recognize Japanese NEs including partial words, we use a character-unitchunking-based NE recognition algorithm (Asahara and Matsumoto, 2003; Nakano and Hirai, 2004) following word-based NE recognition as in (Iwakura, 2010) .",
                "cite_spans": [
                    {
                        "start": 221,
                        "end": 250,
                        "text": "(Asahara and Matsumoto, 2003;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 251,
                        "end": 274,
                        "text": "Nakano and Hirai, 2004)",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 317,
                        "end": 332,
                        "text": "(Iwakura, 2010)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Character-based NE Recognition",
                "sec_num": "2.3"
            },
            {
                "text": "Our character-based NE recognizer uses features extracted from the current character, the preceding two characters and the two succeeding characters (5-character window). The features extracted from each character within the window size are the followings; the character itself, the character type of the character listed in Table 1 , and the NE labels of two preceding recognition results in the direction from the end to the beginning.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 325,
                        "end": 332,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Character-based NE Recognition",
                "sec_num": "2.3"
            },
            {
                "text": "In addition, we use words including characters within the window size. The features of the words are the character types, the POS tags, and the NE labels assigned by a word-based NE recognizer.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Character-based NE Recognition",
                "sec_num": "2.3"
            },
            {
                "text": "As for words including characters, we extract features as follows. Let W (c i ) be the word including the i-th character c i and P (c i ) be the identifier that indicates the position where c i appears in W (c i ). We combine W (c i ) and P (c i ) to create a feature. P (c i ) is one of the followings: B for a character that is the beginning of a word, I for a character that is in the inside of a word, E for a character that is the end of a word, and S for a character that is a word. 3 We use the POS tags of words including characters within 5-character window. Let P OS(W (c i )) be the POS tag of the word W (c i ) including the ith character c i . We express these features with the position identifier P (c i ) like P (c i )-P OS(W (c i )). In addition, we use the character types of words including characters. To utilize outputs of a wordbased NE recognizer, we use NE labels of words assigned by a word-unit NE recognizer. Each character is classified into one of the 33 NE labels provided by the SE representation.",
                "cite_spans": [
                    {
                        "start": 489,
                        "end": 490,
                        "text": "3",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Character-based NE Recognition",
                "sec_num": "2.3"
            },
            {
                "text": "We use a boosting-based learner that learns rules consisting of a feature, or rules represented by combinations of features consisting of more than one feature (Iwakura and Okamoto, 2008) . The boosting algorithm achieves fast training speed by training a weak-learner that learns several rules from a small portion of candidate rules. Candidate rules are generated from a subset of features called bucket. The parameters for the boosting algorithm are as follows. We used the number of rules to be learned as R=100,000, the bucketing size for splitting features into subsets as |B|=1,000, the number of rules learned at each boosting iteration as \u03bd =10, the number of candidate rules used to generate new combinations of features at each rule size as \u03c9=10, and the maximum number of features in rules as \u03b6=2.",
                "cite_spans": [
                    {
                        "start": 160,
                        "end": 187,
                        "text": "(Iwakura and Okamoto, 2008)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Machine Learning Algorithm",
                "sec_num": "2.4"
            },
            {
                "text": "The boosting algorithm operates on binary classification problems. To extend the boosting to multi-class, we used the one-vs-the-rest method. To identify proper tag sequences, we use the Viterbi search. To apply the Viterbi search, we convert the confidence value of each classifier into the range of 0 to 1 with sigmoid function defined as s(X) = 1/(1 + exp(\u2212\u03b2X)), where X is the output of a classifier to an input. We used \u03b2=1 in this experiment. Then we select a tag sequence which maximizes the sum of those log values.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Machine Learning Algorithm",
                "sec_num": "2.4"
            },
            {
                "text": "To obtain a fast processing and training speed, we apply a technique to control the generation of combinations of features (Iwakura, 2009) . This is because fast processing speed is required to obtain word information and rules from large unlabeled data. Using this technique, instead of manually specifying combinations of features to be used, features that are not used in combinations of features are specified as atomic features. The boosting algorithm learns rules consisting of more than one feature from the combinations of features generated from non-atomic features, and rules consisting of only a feature from the atomic and the non-atomic features. We can obtain faster training speed and processing speed because we can reduce the number of combinations of features to be examined by specifying part of features as atomic. We specify features based on word information and rules acquired from unlabeled data as the atomic features.",
                "cite_spans": [
                    {
                        "start": 123,
                        "end": 138,
                        "text": "(Iwakura, 2009)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Machine Learning Algorithm",
                "sec_num": "2.4"
            },
            {
                "text": "This section describes rules and a method to acquire rules.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rules Acquired from Unlabeled Data",
                "sec_num": "3"
            },
            {
                "text": "Previous works such as Isozaki (Isozaki, 2001) , Talukdar et al., (Talukdar et al., 2006) , use rules or lists of NEs for only identifying NEs. In addition to rules identifying NEs, we propose to use rules for identifying the beginning of NEs or the end of NEs to capture context information. To acquire rules, an automatically labeled data with an NE recognizer is used. The following types of rules are acquired.",
                "cite_spans": [
                    {
                        "start": 31,
                        "end": 46,
                        "text": "(Isozaki, 2001)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 49,
                        "end": 89,
                        "text": "Talukdar et al., (Talukdar et al., 2006)",
                        "ref_id": "BIBREF17"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Types",
                "sec_num": "3.1"
            },
            {
                "text": "Word N-gram rules for identifying NEs (NE-W-rules, for short): These are word N-grams corresponding to candidate NEs.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Types",
                "sec_num": "3.1"
            },
            {
                "text": "Word trigram rules for identifying the beginning of NEs (NEB-W-rules): Each rule for identifying the beginning of NEs is represented as a word trigram consisting of the two words preceding the beginning of an NE and the beginning of the NE.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Types",
                "sec_num": "3.1"
            },
            {
                "text": "Word trigram rules for identifying the end of NEs (NEE-W-rules): Each rule for identifying the end of NEs is represented as a word trigram consisting of the two words succeeding the end of an NE and the end of the NE.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Types",
                "sec_num": "3.1"
            },
            {
                "text": "In addition to word N-gram rules, we acquire Word/POS N-gram rules for achieving higher rule coverage. Word/POS N-gram rules are acquired from N-gram rules by replacing some words in N-gram rules with POS tags. We call NE-W-rules, NEB-W-rules and NEE-W-rules converted to Word/POS N-gram rules NE-WP-rules, NEB-WP-rules and NEE-WP-rules, respectively. Word/POS N-gram rules also identify NEs the beginning of NEs and the end of NEs To acquire Word/POS rules, we replace words having one of the following POS tags with their POS tags as rule constituents: proper noun words, unknown words, and number words. This is because words having these POS tags are usually low frequency words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Types",
                "sec_num": "3.1"
            },
            {
                "text": "This section describes the method to acquire the rules used in this paper. The rule acquisition consists of three main steps: First, we create automatically labeled data. Second, seed rules are acquired. Finally the outputs of rules are decided.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "The first step prepares an automatically labeled data with an NE recognizer. The NE recognizer recognizes NEs from unlabeled data and generates the automatically labeled data by annotating characters recognized as NEs with the NE labels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "The second step acquires seed rules from the automatically labeled data. The following is an automatically labeled sentence.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "[ Tanaka/$PN mission/$N party/$N ] ORG went/ $V to/$P [U.K / $PN] LOC ...\" ,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "where $PN (Proper Noun), $N, $V, and $P following / are POS tags, and words between \"[ and ]\" were identified as NEs. ORG and LOC after \"]\" indicate NE types.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "The following seed rules are acquired from the above sentence by following the procedures described in previous sections: NE-W-rules: {Tanaka mission party \u2192 ORG}, NEB-W-rules: {went to U.K \u2192 LW=B-LOC}, NEE-W-rules: {party went to \u2192 FW=E-ORG}, NE-WP-rules: {$PN mission party \u2192 ORG}, NEB-WP-rules: {went to $PN \u2192 LW=B-LOC}, NEE-WP-rules: {$PN mission party \u2192 LW=B-ORG}, where FW, LW, B-LOC, and E-ORG indicate the first words of word sequences that a rule is applied to, the last words of word sequences that a rule is applied to, the beginning word of a LOCATION NE, and the end word of an ORGANIZATION NE, respectively. The left of each \u2192 is the rule condition to apply a rule, and the right of each \u2192 is the seed output of a rule. If the output of a rule is only an NE type, this means the rule identifies an NE. Rules with outputs including = indicate rules for identifying the beginning of NEs or the end of NEs. The left of = indicates the positions of words where the beginning of NEs or the end of NEs exist in the identified word sequences by rules. For example, LW=B-LOC means that LW is B-LOC.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "The final step decides the outputs of each rule. We count the outputs of the rule condition of each seed rule, and the final outputs of each rule are decided by using the frequency of each output. We use outputs assigned to each seed rule more than or equal to 50 times. 4 For example, if LW=B-LOC are obtained 10,000 times, and LW=B-ORG are obtained 1,000 times, as the outputs for {went to $PN}, the followings are acquired as final outputs: LW=B-LOC RANK1, LW=B-ORG RANK2, LW=B-LOC FREQ-5000 < n \u2264 10000, and LW=B-ORG FREQ-500 < n \u2264 1000.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "The LW=B-LOC RANK1 and the LW=B-ORG RANK2 are the ranking of the outputs of rules. LW=B-LOC is 1st ranked output, and LW=B-ORG is 2nd ranked output. Each ranking is decided by the frequency of each output of each rule condition. The most frequent output of each rule is ranked as first.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "LW=B-LOC FREQ-5000 < n \u2264 10000 and LW=B-ORG FREQ-500 < n \u2264 1000 are frequency information. To express the frequency of each rule output as binary features, we categorize the frequency of each rule output by the frequency of each rule output n; 50 \u2264 n \u2264 100, 100 < n \u2264 500, 500 < n \u2264 1000, 1000 < n \u2264 5000, 5000 < n \u2264 10000, 10000 < n \u2264 50000, 50000 < n \u2264 100000, and 100000 < n.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring Rules",
                "sec_num": "3.2"
            },
            {
                "text": "We define the rule application by following the method for using phrase clusters in NER (Lin and Wu, 2009) . The application of rules is allowed to overlap with or be nested in one another. If a rule is applied at positions b to e, we add the features combined with the outputs of the rule and matching positions to each word; outputs with B-(beginning) to b-th word, outputs with E-(end) to b-th word, outputs with I-(inside) within b + 1-th to e \u2212 1-th words, outputs with P-(previous) to b \u2212 1-th word, and outputs with F-(following) to e + 1-th word.",
                "cite_spans": [
                    {
                        "start": 88,
                        "end": 106,
                        "text": "(Lin and Wu, 2009)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Application",
                "sec_num": "3.3"
            },
            {
                "text": "If a rule having the condition {went to $PN} is applied to {.... Ken/$PN went/$V to/$P Japan/ $PN for/$P ...}, the followings are captured as rule application results: b-th word is went, the word between b-th and e-th is to, e-th word is Japan, b \u2212 1-th is Ken, and e + 1-th is for.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Application",
                "sec_num": "3.3"
            },
            {
                "text": "If the output of the rule is LW=B-LOC, the following features are added: B-LW=B-LOC for went, I-LW=B-LOC for to, E-LW=B-LOC for Japan, P-LW=B-LOC for Ken, and F-LW=B-LOC for for.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Rule Application",
                "sec_num": "3.3"
            },
            {
                "text": "We also apply a method to acquire word information (Iwakura, 2010) to the rule acquisition repeatedly. This is because the previous work reported that better accuracy was obtained by repeating the acquisition of NE-related labels of words. The collection method is as follows.",
                "cite_spans": [
                    {
                        "start": 51,
                        "end": 66,
                        "text": "(Iwakura, 2010)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Repeatedly Acquisition",
                "sec_num": "3.4"
            },
            {
                "text": "(1) Create an NE recognizer from training data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Repeatedly Acquisition",
                "sec_num": "3.4"
            },
            {
                "text": "(2) Acquire word information and rules from unlabeled data with the current NE recognizer.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Repeatedly Acquisition",
                "sec_num": "3.4"
            },
            {
                "text": "(3) Create a new NE recognizer with the training data, word information and rules acquired at step (2). This NE recognizer is used for acquiring new word information and rules at the next iteration.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Repeatedly Acquisition",
                "sec_num": "3.4"
            },
            {
                "text": "(4) Go back to step (2) if the termination criterion is not satisfied. The process (2) to (4) is repeated 4 times in this experiment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Repeatedly Acquisition",
                "sec_num": "3.4"
            },
            {
                "text": "The following data prepared for IREX (IREX, 1999) were used in our experiment. We used the CRL data for the training. CRL data has 18,677 NEs on 1,174 stories from Mainichi Newspaper. In addition, to investigate the effectiveness of unlabeled data and labeled data, we prepared another labeled 7,000 news stories including 143,598 NEs from Mainichi Shinbun between 2007 and 2008 according to IREX definition. We have, in total, 8,174 news stories including 162,859 NEs that are about 8 times of CRL data. To create the additional labeled 7,000 news stories, about 509 hours were required. The average time for creating a labeled news story is 260 seconds, which means only 14 labeled news stories are created in an hour.",
                "cite_spans": [
                    {
                        "start": 37,
                        "end": 49,
                        "text": "(IREX, 1999)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental settings",
                "sec_num": "4.1"
            },
            {
                "text": "For evaluation, we used formal-run data of IREX: GENERAL task including 1,581 NEs, and ARREST task including 389 NEs.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental settings",
                "sec_num": "4.1"
            },
            {
                "text": "We compared performance of NE recognizers by using the F-measure (FM) defined as follows with Recall (RE) and Precision (PR); FM = 2 \u00d7 RE \u00d7 PR / ( RE + PR ), where, RE = NUM / (the number of correct NEs), PR = NUM / (the number of NEs extracted by an NE recognizer), and NUM is the number of NEs correctly identified by an NE recognizer. The news stories from the Mainichi Shinbun between 1991 and 2008 and Japanese WikiPedia entries of July 13, 2009, were used as unlabeled data for acquiring word information and rules. The total number of words segmented by MeCab from these unlabeled data was 1,161,758,003, more than one billion words. 5",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental settings",
                "sec_num": "4.1"
            },
            {
                "text": "We evaluated the effectiveness of the combination of word information and rules. Table 2 shows experimental results obtained with an NE recognizer without any word information and rules (NER-BASE, for short), an NE recognizer using word information (NER-W for short), an NE recognizer using rules (NER-R, for short), and an NE recognizer using word information and rules (NER-WR, for short), which is based on our proposed method",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 81,
                        "end": 88,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of Our Proposed Method",
                "sec_num": "4.2"
            },
            {
                "text": "We used word information and rules obtained with the NER-BASE, which was created from CRL data without word information and rules. We see that we obtain better accuracy by using word information and rules acquired from unlabeled data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of Our Proposed Method",
                "sec_num": "4.2"
            },
            {
                "text": "The NER-WR shows the best average Fmeasure (FM). The average FM of the NER-WR is 3.6 points higher than that of the NER-BASE. The average FM of the NER-WR is 0.44 points higher than that of NER-W, and 2.78 points higher than that of the NER-R. These results show that combination of word information and rules contributes to improved accuracy. We also evaluated the effec- Table 3 : Experimental Results obtained with NE recognizers using word information and rules: G., A., and AV. indicate GENERAL, ARREST, and a micro average obtained with each NE recognizer at each iteration, respectively. 1 2 3 4 5 G. 85.35 88.43 88.22 88.20 88.31 A. 85.64 91.33 91.52 91.49 92.19 AV. 85.40 89.00 88.88 88.85 89.08 tiveness of the combination of rules for identifying NEs, and rules for identifying beginning of NEs or end of NEs. The micro average FM values for an NE recognizer using rules for identifying NEs, an NE recognizer using rules for identifying beginning of NEs or end of NEs, and the NE recognizer using the both types of rules are 85. 77, 84.19 and 86.22 . This result shows using the two types of rules are effective.",
                "cite_spans": [
                    {
                        "start": 608,
                        "end": 704,
                        "text": "85.35 88.43 88.22 88.20 88.31 A. 85.64 91.33 91.52 91.49 92.19 AV. 85.40 89.00 88.88 88.85 89.08",
                        "ref_id": null
                    },
                    {
                        "start": 1040,
                        "end": 1059,
                        "text": "77, 84.19 and 86.22",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 373,
                        "end": 380,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of Our Proposed Method",
                "sec_num": "4.2"
            },
            {
                "text": "Then we evaluate the effectiveness of the acquisition method described in section 3.4. Table 3 shows the accuracy obtained with each NE recognizer at each iteration. The results at iteration 1 is the results obtained with the base line NE recognizer not using word information and rules. We obtained the best average accuracy at iteration 5. The results obtained with the NE recognizer at iteration 5 shows 4.76 points higher average Fmeasure than that of the NE recognizer at iteration 1, and 0.37 points higher average F-measure than that of the NE recognizer at iteration 2. Table 4 shows the results of the previous works using IREX Japanese NE recognition tasks. All the results were obtained with CRL data as manually labeled training data. Our results are Fmeasure values obtained with the NE recognizer at iteration 5 on Table 3 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 87,
                        "end": 94,
                        "text": "Table 3",
                        "ref_id": null
                    },
                    {
                        "start": 578,
                        "end": 585,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 829,
                        "end": 836,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of Our Proposed Method",
                "sec_num": "4.2"
            },
            {
                "text": "We see that our NE recognizer shows the best F-measure values for GENERAL and ARREST. Compared with our method only using unlabeled data, most previous works use handcrafted resources, such as a set of NEs are used in (Uchimoto et al., 2000) , and NTT GOI Taikei (Ikehara et al., 1999) , which is a handcrafted thesaurus, is used in (Isozaki and Kazawa, 2002; Sasano and Kurohashi, 2008) . These results indicate that word information and rules acquired from large unlabeled data are also useful as well as handcrafted resources. In addition, we see that our method with large labeled data show much better perfor- Table 4 : Comparison with previous works. GE and AR indicate GENERAL and ARREST. GE AR (Uchimoto et al., 2000) 80.17 85.75 (Takemoto et al., 2001) 83.86 - (Utsuro et al., 2002) 84.07 - (Isozaki and Kazawa, 2002) 85.77 - (Sasano and Kurohashi, 2008) 87.72 - (Iwakura, 2010) 87 ",
                "cite_spans": [
                    {
                        "start": 218,
                        "end": 241,
                        "text": "(Uchimoto et al., 2000)",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 263,
                        "end": 285,
                        "text": "(Ikehara et al., 1999)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 333,
                        "end": 359,
                        "text": "(Isozaki and Kazawa, 2002;",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 360,
                        "end": 387,
                        "text": "Sasano and Kurohashi, 2008)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 702,
                        "end": 725,
                        "text": "(Uchimoto et al., 2000)",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 738,
                        "end": 761,
                        "text": "(Takemoto et al., 2001)",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 770,
                        "end": 791,
                        "text": "(Utsuro et al., 2002)",
                        "ref_id": "BIBREF19"
                    },
                    {
                        "start": 800,
                        "end": 826,
                        "text": "(Isozaki and Kazawa, 2002)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 835,
                        "end": 863,
                        "text": "(Sasano and Kurohashi, 2008)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 872,
                        "end": 887,
                        "text": "(Iwakura, 2010)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 615,
                        "end": 622,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of Our Proposed Method",
                "sec_num": "4.2"
            },
            {
                "text": "This section describes the performances of NE recognizers trained with larger training data than CRL-data. Figure 1 shows the performance of each NE recognizer trained with different size of labeled training data. The leftmost points are the performance of the NE recognizers trained with CRL data (1,174 news stories). The other points are the performances of NE recognizers trained with training data larger than CRL data. The size of the additional training data is increased by 500 news stories.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 107,
                        "end": 115,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluating Effectiveness of Our Method",
                "sec_num": "4.3"
            },
            {
                "text": "We examined NE recognizers using our proposed method (semi), and NE recognizers not using our method (non-semi). In the following, semi-NER indicates NE recognizers using unlabeled data based on our method, and non-semi-NER indicates NE recognizers not using unlabeled data. Figure 1 shows that the semi-NER trained with CRL data shows competitive perfor-mance of the non-semi-NER trained with about 1.5 time larger training data consisting of CRL data and additional labeled 500 news stories. To create manually labeled 500 news stories, about 36 hours are required. 6 To achieve the competitive performance of the non-semi-NER trained with CRL data and the labeled 7,000 news stories, semi-NER requires only 2,000 news stories in addition to CRL data. This result shows that our proposed method significantly reduces the number of labeled data to achieve a competitive performance obtained with only using labeled data. Figure 1 also shows that our method contributes to improved accuracy when using the large labeled training data consisting of CRL data and 7,000 news stories. The accuracy is 90.47 for GEN-ERAL, and 94.30 for ARREST. In contrast, when without word information and rules acquired from unlabeled data, the accuracy is 89.43 for GEN-ERAL, and 93.44 for ARREST.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 275,
                        "end": 283,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 922,
                        "end": 930,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluating Effectiveness of Our Method",
                "sec_num": "4.3"
            },
            {
                "text": "To augment features, methods for using information obtained with clustering algorithms were proposed. These methods used word clusters (Freitag, 2004; Miller et al., 2004) , the clusters of multi-word nouns (Kazama and Torisawa, 2008) , or phrase clusters (Lin and Wu, 2009) . In contrast, to collect rules, we use an automatically tagged data with an NE recognizer. Therefore, we expect to obtain more target-task-oriented information with our method than that of previous works. Although there are differences between our method and the previous works, our method and previous works are complementary .",
                "cite_spans": [
                    {
                        "start": 135,
                        "end": 150,
                        "text": "(Freitag, 2004;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 151,
                        "end": 171,
                        "text": "Miller et al., 2004)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 207,
                        "end": 234,
                        "text": "(Kazama and Torisawa, 2008)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 256,
                        "end": 274,
                        "text": "(Lin and Wu, 2009)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "5"
            },
            {
                "text": "To use rules in machine-learning-based NE recognitions, Isozaki proposed a Japanese NE recognition method based on a simple rule generator and decision tree learning. The method generates rules from supervised training data (Isozaki, 2001) . Talukdar et al., proposed a method to use lists of NEs acquired from unlabeled data for NE recognition (Talukdar et al., 2006) . Starting with a few NE seed examples, the method extends lists of NEs. These methods use rules or lists of NEs for identifying only NEs. Compared with these methods, our method uses rules for identifying the beginning of NEs and the end of NEs in addition 6 We estimate the hours by using the average labeling time of a news story. The average time is 260 seconds per news story.",
                "cite_spans": [
                    {
                        "start": 224,
                        "end": 239,
                        "text": "(Isozaki, 2001)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 345,
                        "end": 368,
                        "text": "(Talukdar et al., 2006)",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 627,
                        "end": 628,
                        "text": "6",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "5"
            },
            {
                "text": "to rules identifying whole NEs. Therefore, our methods can use new features not used in previous works.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "5"
            },
            {
                "text": "This paper proposed an NE recognition method using rules acquired from unlabeled data. Our method acquires rules for identifying NEs, the beginning of NEs, and the end of NEs from an automatically labeled data with an NE recognizer. In addition, we use word information including the candidate NE classes, and so on. We evaluated our method with IREX data set for Japanese NE recognition and unlabeled data consisting of more than one billion words. The experimental results showed that our method using rules and word information achieved the best accuracy on the GEN-ERAL and ARREST tasks.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "http://mecab.sourceforge.net/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "For example, Japanese word \"houbei\" (visit U.S.) does not match with LOCATION \"bei (U.S)\".3 If \"Gaimusyouha\", is segmented as \"Gaimusyou (the Ministry of Foreign Affairs) / ha (particle)\", then words including characters are follows; W (Gai) = Gaimusyou, W (mu) = Gaimusyou, W (syou) = Gaimusyou, and W (ha)=ha. The identifiers that indicate positions where characters appear are follows; P (Gai) =B, P (mu) = I, P (syou) = E, and P (ha)=S.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "We conducted experiments using word information and rules obtained from training data with different frequency threshold parameters. The parameters are 1, 3, 5, 10, 20, 30, 40, and 50. We select 50 as the threshold because the parameter shows the best result among the results obtained with these parameters on a pilot study.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "We used WikiPedia in addition to news stories becauseSuzuki and Isozaki (Suzuki and Isozaki, 2008) reported that the use of more unlabeled data in their learning algorithm can really lead to further improvements. We treated a successive numbers and alphabets as a word in this experiment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "A high-performance semi-supervised learning method for text chunking",
                "authors": [
                    {
                        "first": "Rie",
                        "middle": [],
                        "last": "Ando",
                        "suffix": ""
                    },
                    {
                        "first": "Tong",
                        "middle": [],
                        "last": "Zhang",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proc. of ACL 2005",
                "volume": "",
                "issue": "",
                "pages": "1--9",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Rie Ando and Tong Zhang. 2005. A high-performance semi-supervised learning method for text chunking. In Proc. of ACL 2005, pages 1-9.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Japanese named entity extraction with redundant morphological analysis",
                "authors": [
                    {
                        "first": "Masayuki",
                        "middle": [],
                        "last": "Asahara",
                        "suffix": ""
                    },
                    {
                        "first": "Yuji",
                        "middle": [],
                        "last": "Matsumoto",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proc. of HLT-NAACL 2003",
                "volume": "",
                "issue": "",
                "pages": "8--15",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Masayuki Asahara and Yuji Matsumoto. 2003. Japanese named entity extraction with redundant morphological analysis. In Proc. of HLT-NAACL 2003, pages 8-15.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Trained named entity recognition using distributional clusters",
                "authors": [
                    {
                        "first": "Dayne",
                        "middle": [],
                        "last": "Freitag",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. of EMNLP",
                "volume": "",
                "issue": "",
                "pages": "262--269",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dayne Freitag. 2004. Trained named entity recog- nition using distributional clusters. In Proc. of EMNLP 2004, pages 262-269.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Goi-Taikei -A Japanese Lexicon CDROM. Iwanami Shoten",
                "authors": [
                    {
                        "first": "Satoru",
                        "middle": [],
                        "last": "Ikehara",
                        "suffix": ""
                    },
                    {
                        "first": "Masahiro",
                        "middle": [],
                        "last": "Miyazaki",
                        "suffix": ""
                    },
                    {
                        "first": "Satoshi",
                        "middle": [],
                        "last": "Shirai",
                        "suffix": ""
                    },
                    {
                        "first": "Akio",
                        "middle": [],
                        "last": "Yokoo",
                        "suffix": ""
                    },
                    {
                        "first": "Hiromi",
                        "middle": [],
                        "last": "Nakaiwa",
                        "suffix": ""
                    },
                    {
                        "first": "Kentaro",
                        "middle": [],
                        "last": "Ogura",
                        "suffix": ""
                    },
                    {
                        "first": "Yoshifumi",
                        "middle": [],
                        "last": "Ooyama",
                        "suffix": ""
                    },
                    {
                        "first": "Yoshihiki",
                        "middle": [],
                        "last": "Hayashi",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Satoru Ikehara, Masahiro Miyazaki, Satoshi Shirai, Akio Yokoo, Hiromi Nakaiwa, Kentaro Ogura, Yoshifumi Ooyama, and Yoshihiki Hayashi. 1999. Goi-Taikei -A Japanese Lexicon CDROM. Iwanami Shoten.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Proc. of the IREX workshop",
                "authors": [
                    {
                        "first": "Irex",
                        "middle": [],
                        "last": "Committee",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Committee IREX. 1999. Proc. of the IREX workshop.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Speeding up named entity recognition based on Support Vector Machines",
                "authors": [
                    {
                        "first": "Hideki",
                        "middle": [],
                        "last": "Isozaki",
                        "suffix": ""
                    },
                    {
                        "first": "Hideto",
                        "middle": [],
                        "last": "Kazawa",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "IPSJ SIG notes NL-149-1",
                "volume": "",
                "issue": "",
                "pages": "1--8",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hideki Isozaki and Hideto Kazawa. 2002. Speeding up named entity recognition based on Support Vector Machines (in Japanese). In IPSJ SIG notes NL-149- 1, pages 1-8.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Japanese named entity recognition based on a simple rule generator and decision tree learning",
                "authors": [
                    {
                        "first": "Hideki",
                        "middle": [],
                        "last": "Isozaki",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proc. of ACL",
                "volume": "",
                "issue": "",
                "pages": "314--321",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hideki Isozaki. 2001. Japanese named entity recogni- tion based on a simple rule generator and decision tree learning. In Proc. of ACL 2001, pages 314-321.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "A fast boosting-based learner for feature-rich tagging and chunking",
                "authors": [
                    {
                        "first": "Tomoya",
                        "middle": [],
                        "last": "Iwakura",
                        "suffix": ""
                    },
                    {
                        "first": "Seishi",
                        "middle": [],
                        "last": "Okamoto",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proc. of CoNLL",
                "volume": "",
                "issue": "",
                "pages": "17--24",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Tomoya Iwakura and Seishi Okamoto. 2008. A fast boosting-based learner for feature-rich tagging and chunking. In Proc. of CoNLL 2008, pages 17-24.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Fast boosting-based part-ofspeech tagging and text chunking with efficient rule representation for sequential labeling",
                "authors": [
                    {
                        "first": "Tomoya",
                        "middle": [],
                        "last": "Iwakura",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proc. of RANLP",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Tomoya Iwakura. 2009. Fast boosting-based part-of- speech tagging and text chunking with efficient rule representation for sequential labeling. In Proc. of RANLP 2009.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "A named entity extraction using word information repeatedly collected from unlabeled data",
                "authors": [
                    {
                        "first": "Tomoya",
                        "middle": [],
                        "last": "Iwakura",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proc. of CICLing",
                "volume": "",
                "issue": "",
                "pages": "212--223",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Tomoya Iwakura. 2010. A named entity extraction us- ing word information repeatedly collected from un- labeled data. In Proc. of CICLing 2010, pages 212- 223.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Inducing gazetteers for named entity recognition by largescale clustering of dependency relations",
                "authors": [
                    {
                        "first": "Kentaro",
                        "middle": [],
                        "last": "Jun'ichi Kazama",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Torisawa",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proc. of ACL-08: HLT",
                "volume": "",
                "issue": "",
                "pages": "407--415",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jun'ichi Kazama and Kentaro Torisawa. 2008. Induc- ing gazetteers for named entity recognition by large- scale clustering of dependency relations. In Proc. of ACL-08: HLT, pages 407-415.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Phrase clustering for discriminative learning",
                "authors": [
                    {
                        "first": "Dekang",
                        "middle": [],
                        "last": "Lin",
                        "suffix": ""
                    },
                    {
                        "first": "Xiaoyun",
                        "middle": [],
                        "last": "Wu",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proc. of ACL-IJCNLP 2009",
                "volume": "",
                "issue": "",
                "pages": "1030--1038",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dekang Lin and Xiaoyun Wu. 2009. Phrase cluster- ing for discriminative learning. In Proc. of ACL- IJCNLP 2009, pages 1030-1038.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Name tagging with word clusters and discriminative training",
                "authors": [
                    {
                        "first": "Scott",
                        "middle": [],
                        "last": "Miller",
                        "suffix": ""
                    },
                    {
                        "first": "Jethran",
                        "middle": [],
                        "last": "Guinness",
                        "suffix": ""
                    },
                    {
                        "first": "Alex",
                        "middle": [],
                        "last": "Zamanian",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. of HLT-NAACL 2004",
                "volume": "",
                "issue": "",
                "pages": "337--342",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Scott Miller, Jethran Guinness, and Alex Zamanian. 2004. Name tagging with word clusters and dis- criminative training. In Proc. of HLT-NAACL 2004, pages 337-342.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Japanese named entity extraction with bunsetsu features",
                "authors": [
                    {
                        "first": "Keigo",
                        "middle": [],
                        "last": "Nakano",
                        "suffix": ""
                    },
                    {
                        "first": "Yuzo",
                        "middle": [],
                        "last": "Hirai",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "IPSJ Journal",
                "volume": "45",
                "issue": "3",
                "pages": "934--941",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Keigo Nakano and Yuzo Hirai. 2004. Japanese named entity extraction with bunsetsu features (in Japanese). In IPSJ Journal, 45(3), pages 934-941.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Japanese named entity recognition using structural natural language processing",
                "authors": [
                    {
                        "first": "Ryohei",
                        "middle": [],
                        "last": "Sasano",
                        "suffix": ""
                    },
                    {
                        "first": "Sadao",
                        "middle": [],
                        "last": "Kurohashi",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proc. of IJCNLP",
                "volume": "",
                "issue": "",
                "pages": "607--612",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ryohei Sasano and Sadao Kurohashi. 2008. Japanese named entity recognition using structural natural language processing. In Proc. of IJCNLP 2008, pages 607-612.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Semi-supervised sequential labeling and segmentation using gigaword scale unlabeled data",
                "authors": [
                    {
                        "first": "Jun",
                        "middle": [],
                        "last": "Suzuki",
                        "suffix": ""
                    },
                    {
                        "first": "Hideki",
                        "middle": [],
                        "last": "Isozaki",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proc. of ACL-08: HLT",
                "volume": "",
                "issue": "",
                "pages": "665--673",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jun Suzuki and Hideki Isozaki. 2008. Semi-supervised sequential labeling and segmentation using giga- word scale unlabeled data. In Proc. of ACL-08: HLT, pages 665-673.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "A Japanese named entity extraction system based on building a large-scale and high quality dictionary and pattern-matching rules",
                "authors": [
                    {
                        "first": "Yoshikazu",
                        "middle": [],
                        "last": "Takemoto",
                        "suffix": ""
                    },
                    {
                        "first": "Toshikazu",
                        "middle": [],
                        "last": "Fukushima",
                        "suffix": ""
                    },
                    {
                        "first": "Hiroshi",
                        "middle": [],
                        "last": "Yamada",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "",
                "volume": "42",
                "issue": "",
                "pages": "1580--1591",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yoshikazu Takemoto, Toshikazu Fukushima, and Hi- roshi Yamada. 2001. A Japanese named entity ex- traction system based on building a large-scale and high quality dictionary and pattern-matching rules (in Japanese). 42(6):1580-1591.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "A context pattern induction method for named entity extraction",
                "authors": [
                    {
                        "first": "Partha",
                        "middle": [],
                        "last": "Pratim Talukdar",
                        "suffix": ""
                    },
                    {
                        "first": "Thorsten",
                        "middle": [],
                        "last": "Brants",
                        "suffix": ""
                    },
                    {
                        "first": "Mark",
                        "middle": [],
                        "last": "Liberman",
                        "suffix": ""
                    },
                    {
                        "first": "Fernando",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proc. of CoNLL",
                "volume": "",
                "issue": "",
                "pages": "141--148",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Partha Pratim Talukdar, Thorsten Brants, Mark Liber- man, and Fernando Pereira. 2006. A context pat- tern induction method for named entity extraction. In Proc. of CoNLL 2006, pages 141-148.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "Named entity extraction based on a maximum entropy model and transformati on rules",
                "authors": [
                    {
                        "first": "Kiyotaka",
                        "middle": [],
                        "last": "Uchimoto",
                        "suffix": ""
                    },
                    {
                        "first": "Qing",
                        "middle": [],
                        "last": "Ma",
                        "suffix": ""
                    },
                    {
                        "first": "Masaki",
                        "middle": [],
                        "last": "Murata",
                        "suffix": ""
                    },
                    {
                        "first": "Hiromi",
                        "middle": [],
                        "last": "Ozaku",
                        "suffix": ""
                    },
                    {
                        "first": "Masao",
                        "middle": [],
                        "last": "Utiyama",
                        "suffix": ""
                    },
                    {
                        "first": "Hitoshi",
                        "middle": [],
                        "last": "Isahara",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proc. of the ACL 2000",
                "volume": "",
                "issue": "",
                "pages": "326--335",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kiyotaka Uchimoto, Qing Ma, Masaki Murata, Hiromi Ozaku, Masao Utiyama, and Hitoshi Isahara. 2000. Named entity extraction based on a maximum en- tropy model and transformati on rules. In Proc. of the ACL 2000, pages 326-335.",
                "links": null
            },
            "BIBREF19": {
                "ref_id": "b19",
                "title": "Combining outputs of multiple Japanese named entity chunkers by stacking",
                "authors": [
                    {
                        "first": "Takehito",
                        "middle": [],
                        "last": "Utsuro",
                        "suffix": ""
                    },
                    {
                        "first": "Manabu",
                        "middle": [],
                        "last": "Sassano",
                        "suffix": ""
                    },
                    {
                        "first": "Kiyotaka",
                        "middle": [],
                        "last": "Uchimoto",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proc. of EMNLP 2002",
                "volume": "",
                "issue": "",
                "pages": "281--288",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Takehito Utsuro, Manabu Sassano, and Kiyotaka Uchi- moto. 2002. Combining outputs of multiple Japanese named entity chunkers by stacking. In Proc. of EMNLP 2002, pages 281-288.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "text": "Experimental results obtained with different size of training data. Each point indicates the micro average F-measure of an NE recognizer. mance than the other methods.",
                "type_str": "figure",
                "num": null
            },
            "TABREF0": {
                "content": "<table><tr><td>Hiragana (Japanese syllabary characters), Katakana,</td></tr><tr><td>Kanji (Chinese letter), Capital alphabet,</td></tr><tr><td>Lower alphabet, number and Others</td></tr><tr><td>training data.</td></tr></table>",
                "html": null,
                "type_str": "table",
                "text": "Basic character types",
                "num": null
            },
            "TABREF1": {
                "content": "<table><tr><td/><td>B.</td><td>+ W</td><td>+ R</td><td>+WR</td></tr><tr><td colspan=\"5\">GENERAL 85.35 88.04 85.93 88.43</td></tr><tr><td>ARREST</td><td colspan=\"4\">85.64 89.35 87.39 91.33</td></tr><tr><td>AV.</td><td colspan=\"4\">85.40 88.56 86.22 89.00</td></tr></table>",
                "html": null,
                "type_str": "table",
                "text": "Experimental Results: Each AV. indicates a micro average F-measure obtained with each NE recognizer. B., +W, +R, and +WR indicate the base line recognizer, using word information, using rules, and using word information and rules. Base indicates the base line NE recognizer not using word information and rules.",
                "num": null
            }
        }
    }
}