File size: 65,375 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
{
    "paper_id": "R11-1007",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T15:05:00.724574Z"
    },
    "title": "Using a Morphological Database to Increase the Accuracy in POS Tagging",
    "authors": [
        {
            "first": "Hrafn",
            "middle": [],
            "last": "Loftsson",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Reykjavik University",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Sigr\u00fan",
            "middle": [],
            "last": "Helgad\u00f3ttir",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Eir\u00edkur",
            "middle": [],
            "last": "R\u00f6gnvaldsson",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Iceland",
                "location": {}
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We experiment with extending the dictionaries used by three open-source partof-speech taggers, by using data from a large Icelandic morphological database. We show that the accuracy of the taggers can be improved significantly by using the database. The reason is that the unknown word ratio reduces dramatically when adding data from the database to the taggers' dictionaries. For the best performing tagger, the overall tagging accuracy increases from the base tagging result of 92.73% to 93.32%, when the unknown word ratio decreases from 6.8% to 1.1%. When we add reliable frequency information to the tag profiles for some of the words originating from the database, we are able to increase the accuracy further to 93.48%-this is equivalent to 10.3% error reduction compared to the base tagger.",
    "pdf_parse": {
        "paper_id": "R11-1007",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We experiment with extending the dictionaries used by three open-source partof-speech taggers, by using data from a large Icelandic morphological database. We show that the accuracy of the taggers can be improved significantly by using the database. The reason is that the unknown word ratio reduces dramatically when adding data from the database to the taggers' dictionaries. For the best performing tagger, the overall tagging accuracy increases from the base tagging result of 92.73% to 93.32%, when the unknown word ratio decreases from 6.8% to 1.1%. When we add reliable frequency information to the tag profiles for some of the words originating from the database, we are able to increase the accuracy further to 93.48%-this is equivalent to 10.3% error reduction compared to the base tagger.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "In general, part-of-speech (PoS) taggers can be catagorised into two types. First, data-driven taggers, i.e. taggers that are trained on pre-tagged corpora and are both language and tagset independent, e.g. (Brants, 2000; Toutanova et al., 2003; Shen et al., 2007) . Second, linguistic rule-based taggers, which are developed \"by hand\" using linguistic knowledge, with the purpose of tagging a specific language using a particular tagset, e.g. (Karlsson et al., 1995; Loftsson, 2008) .",
                "cite_spans": [
                    {
                        "start": 207,
                        "end": 221,
                        "text": "(Brants, 2000;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 222,
                        "end": 245,
                        "text": "Toutanova et al., 2003;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 246,
                        "end": 264,
                        "text": "Shen et al., 2007)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 444,
                        "end": 467,
                        "text": "(Karlsson et al., 1995;",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 468,
                        "end": 483,
                        "text": "Loftsson, 2008)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "All taggers use a particular tagset T and rely on a dictionary D containing the tag profile (ambiguity class) T w for each word w. A tag profile T w indicates which tags are assignable to w, thus T w \u2282 T . Essentially, for each word w, a tagger disambiguates T w by selecting (or removing all but) one tag from it with regard to context. The dictionary D is derived by a data-driven tagger during training, and derived or built during development of a linguistic rule-based tagger.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "When tagging new text, PoS taggers frequently encounter words that are not in D, i.e. so-called unknown words. An unknown word u can be quite problematic for a tagger, because the tag profile for u needs to be guessed. In most cases, PoS taggers therefore contain a special module, called an unknown word guesser, to generate the tag profile for unknown words. Frequently, the guessing of the tag profile for unknown words is incorrect and therefore the tagging accuracy for these words is considerably lower than the tagging accuracy for known words. To increase the overall tagging accuracy of PoS taggers, one might therefore try to refine the underlying unknown word guessers. Another approach is simply to try to minimise the ratio of unknown words by extending the dictionaries used by the taggers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we use the latter approach. We experiment with extending the dictionaries used by three PoS taggers for Icelandic with data from a large morphological database (Bjarnad\u00f3ttir, 2005) . Our logical assumption is that the overall tagging accuracies of the taggers can be increased by this method, but we are also interested in how extended dictionaries affect the accuracy for unknown words and known words separately.",
                "cite_spans": [
                    {
                        "start": 175,
                        "end": 195,
                        "text": "(Bjarnad\u00f3ttir, 2005)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The three taggers used in our experiments are: i) the linguistic rule-based tagger IceTagger (Loftsson, 2008); ii) TriTagger, a re-implementation of the statistical tagger TnT by Brants (2000) ; and iii) a serial combination of the two (Loftsson et al., 2009) .",
                "cite_spans": [
                    {
                        "start": 179,
                        "end": 192,
                        "text": "Brants (2000)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 236,
                        "end": 259,
                        "text": "(Loftsson et al., 2009)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The morphological database does not contain any frequency information for the tags in the tag profile for each word, but, nevertheless, we show that the tagging accuracy of the taggers can be improved significantly by using the database. The reason is that when we add most of the data from the database to the taggers' dictionaries the unknown word ratio decreases dramatically, from 6.8% to 1.1%. In that case, the overall tagging accuracy of the best performing tagger, the serial combination of IceTagger and TriTagger, increases from the base tagging result of 92.73% to 93.32%. When we add reliable frequency information, derived from a corpus, to the tag profiles for a part of the words originating from the database, we are able to increase the accuracy further to 93.48% -this is equivalent to 10.3% error reduction compared to the base tagger.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Interestingly, it seems that very few papers exist in the literature regarding extensions of the dictionaries used by PoS taggers. In (Rupnik et al., 2008) , a dictionary derived from training is essentially extended by using a backup lexicon extracted from a large corpus (which is different from the training corpus). In contrast, we use a morphological database to extend a tagger's dictionary, but use a corpus for deriving frequency information for part of the dictionary entries. In (Tufis et al., 2008) , an unknown word u, and its tag profile and lemma obtained by a tagger when tagging new texts, is used by a morphological generator to generate tag profiles for new word forms that are morphologically related to u. The dictionary is thus extended incrementally, each time new text is tagged. In contrast, since we have access to a large morphological database, we extend a tagger's dictionary once and for all.",
                "cite_spans": [
                    {
                        "start": 134,
                        "end": 155,
                        "text": "(Rupnik et al., 2008)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 489,
                        "end": 509,
                        "text": "(Tufis et al., 2008)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "At the \u00c1rni Magn\u00fasson Institute for Icelandic Studies, a comprehensive full form database of modern Icelandic inflections has been developed (Bjarnad\u00f3ttir, 2005) . Its Icelandic abbreviation is B\u00cdN (\"Beygingarl\u00fdsing \u00edslensks n\u00fat\u00edmam\u00e1ls\"), and henceforth we use that term. B\u00cdN contains about 280,000 paradigms, with over 5.8 million inflectional forms. The output from the database used in this project contains lemma, word form, word class, and morphological features for common nouns, proper nouns, adjectives, verbs, and adverbs. It is important to note that the database does, however, not contain any frequency information for the word forms.",
                "cite_spans": [
                    {
                        "start": 141,
                        "end": 161,
                        "text": "(Bjarnad\u00f3ttir, 2005)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The morphological database",
                "sec_num": "2"
            },
            {
                "text": "A web interface for B\u00cdN is available at http: //bin.arnastofnun.is, from where a text file in the format used in this project can be downloaded. Below are 16 lines from the file, demon-strating entries for the lemma \"hestur\" 'horse': The exact meaning of the data in each column is not important for our discussion, but we point out that the lemma is in the first column, gender is in third column (\"kk\"=masculine), the word form is in the fifth column, and the morphological features case, number and definiteness are in the last column (for example, \"NF\"=nominative, \"ET\"=singular, \"gr\"=definite article).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The morphological database",
                "sec_num": "2"
            },
            {
                "text": "The Icelandic Frequency Dictionary (IFD) corpus (Pind et al., 1991) has been used to train and test taggers for Icelandic (Helgad\u00f3ttir, 2005; Loftsson, 2008; Dredze and Wallenberg, 2008; Loftsson et al., 2009) . The corpus contains about 590,000 tokens, and its underlying tagset about 700 tags, of which 639 tags actually appear in the corpus. The tags are character strings where each character has a particular function. The first character denotes the word class. For each word class there is a predefined number of additional characters (at most six), which describe morphological features, like gender, number and case for nouns; degree and declension for adjectives; voice, mood and tense for verbs, etc. To illustrate, consider the word form \"hestur\" 'horse'. The corresponding tag is \"nken\", denoting noun (n), masculine (k), singular (e), and nominative (n) case.",
                "cite_spans": [
                    {
                        "start": 48,
                        "end": 67,
                        "text": "(Pind et al., 1991)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 122,
                        "end": 141,
                        "text": "(Helgad\u00f3ttir, 2005;",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 142,
                        "end": 157,
                        "text": "Loftsson, 2008;",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 158,
                        "end": 186,
                        "text": "Dredze and Wallenberg, 2008;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 187,
                        "end": 209,
                        "text": "Loftsson et al., 2009)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "As mentioned in Section 1, we use one linguistic rule-based tagger (IceTagger), one data-driven tagger (TriTagger), and a serial combination of the two in our experiments. Both IceTagger and TriTagger are implemented in Java and are part of the open-source IceNLP toolkit 1 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "IceTagger is reductionistic in nature, i.e. it removes inappropriate tags from the tag profile T w for a specific word w in a given context. Ice-Tagger first applies local rules for initial disambiguation and then uses a set of heuristics (global rules) for further disambiguation. The tag profile for each word used by IceTagger is ordered by the frequency of the tags -the first tag listed is the most frequent one and the last tag is the least frequent one. If a word is still ambiguous after the application of the heuristics, the default heuristic is simply to choose the most frequent tag (the first tag) for the word. An important part of Ice-Tagger is its unknown word guesser, IceMorphy. It guesses the tag profile for unknown words by applying morphological analysis and ending analysis. In addition, IceMorphy can fill in the tag profile gaps 2 in the dictionary for words belonging to certain morphological classes (Loftsson, 2008) .",
                "cite_spans": [
                    {
                        "start": 927,
                        "end": 943,
                        "text": "(Loftsson, 2008)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "TriTagger is a re-implementation of the well known Hidden Markov Model (HMM) tagger TnT by Brants (2000) 3 . TriTagger uses a trigram model to find the sequence of tags for words in a sentence which maximises the product of contextual probabilities (P (t i |t i\u22122 , t i\u22121 )) and lexical probabilities (P (w i |t i )):",
                "cite_spans": [
                    {
                        "start": 91,
                        "end": 104,
                        "text": "Brants (2000)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "P (t 1 )P (t 2 |t 1 ) n i=3 P (t i |t i\u22122 , t i\u22121 ) n i=1 P (w i |t i ) (1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "In the above equation, w i denotes word i in a sentence of length n (1 \u2264 i \u2264 n) and t i denotes the tag for w i . The probabilities are derived using maximum likelihood estimation based on the frequencies of tags found during training.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "HMM taggers handle unknown words by setting tag probabilities according to words' suffixes. The term suffix is here defined as a final sequence of characters of a word. TnT, and thus TriTagger, generate probability distributions for suffixes of various lengths. The distribution for particular suffixes is based on words in the training data that share the same suffix. The reader is referred to (Brants, 2000) for the details of suffix handling.",
                "cite_spans": [
                    {
                        "start": 396,
                        "end": 410,
                        "text": "(Brants, 2000)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "2 A tag profile gap for a word occurs when a tag is missing from the tag profile. This occurs, for example, if not all possible tags for a given word are encountered during training.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "3 The TnT tagger is extremely efficient -both training and testing are very fast. Unfortunately, TnT is closed source which limits its use when changes need to be carried out to its default behaviour. TriTagger is open-source and therefore its functionality can be changed or extended relatively easily. Moreover, our experiments have shown that its tagging accuracy is almost identical to the accuracy obtained by TnT. On the other hand, TriTagger has not been optimised for run-time efficiency.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "Below, we exemplify the tag profiles stored in the dictionaries for IceTagger and TriTagger for a specific word \"konu\" 'woman': konu nve\u00fe nveo nvee konu 122 nve\u00fe 44 nveo 42 nvee 36",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "The first tag profile is stored in the dictionary for IceTagger. The possible tags are \"nve\u00fe\", \"nveo\", and \"nvee\" (denoting noun, feminine, singular, dative/accusative/genetive), sorted by decreasing frequency. The second tag profile is stored in the dictionary for TriTagger. It contains similar information, but, additionally, frequency information is attached to both the word itself and each possible tag.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The corpus and the taggers used",
                "sec_num": "3"
            },
            {
                "text": "We have previously shown (Loftsson et al., 2009) that a significant improvement in tagging accuracy is obtainable by running a serial combination of IceTagger and a HMM tagger (TriTagger). Specifically, the best result was obtained by making the HMM perform initial disambiguation only with regard to the word class (the first letter of a tag), then running IceTagger, and finally by making the HMM disambiguate words that IceTagger was not able to fully disambiguate. This tagger is called HMM+Ice+HMM.",
                "cite_spans": [
                    {
                        "start": 25,
                        "end": 48,
                        "text": "(Loftsson et al., 2009)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Base tagging results",
                "sec_num": "3.1"
            },
            {
                "text": "In our current experiments, we use 10-fold cross-validation on the exact same training and test splits of the so-called corrected version of the IFD corpus used by Loftsson et al. (2009) . Each test corpus contains about 10% of the tokens from the IFD, while the corresponding training corpus contains about 90% of the tokens. The average unknown word ratio using this data split is about 6.8%.",
                "cite_spans": [
                    {
                        "start": 164,
                        "end": 186,
                        "text": "Loftsson et al. (2009)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Base tagging results",
                "sec_num": "3.1"
            },
            {
                "text": "We use a version of the corrected IFD corpus in which type information for proper nouns (namedentity classification) has been removed, and additionally we only use one tag for numerical constants. The reason for these changes is to make the tagset of the corpus comparable to tagsets for other languages. These changes reduce the size of the tagset from about 700 tags to about 600 tags, and the number of tags actually appearing in the IFD reduces from 639 tags to 567. Table 1 shows the average accuracy of the three taggers. In this table (and in all the ones that follow), the average accuracy is based on testing using the first nine test corpora, because the tenth one was used for developing IceTagger. We consider the accuracy figures in tagging results -in the experiments described in the next section we try to improve on these figures.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 471,
                        "end": 478,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Base tagging results",
                "sec_num": "3.1"
            },
            {
                "text": "In this section, we describe the setup and results of two experiments. First, we extend the dictionaries used by the three taggers by using data from the morphological database B\u00cdN. Second, we add reliable frequency information to some of the dictionary entries (tag profiles).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The experiments",
                "sec_num": "4"
            },
            {
                "text": "This part of our experiment is in two parts. First, we generate a file F 1 by extracting only lemmata from the database output described in Section 2. F 1 contains about 280,000 lemmata. To clarify, only the first line in the example output shown in Section 2 is then included in F 1 . Second, we drop the lemmata condition and generate a file F 2 by selecting most of the word forms from the database output 4 . F 2 contains about 5.3 million rows.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "To generate an extended dictionary for a tagger (classifier) C using data from F 1 , we perform the following (the same procedure applies when using F 2 ):",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "1. Derive a dictionary from F 1 , containing words and their corresponding tag profiles. Symbols denoting morphological features in F 1 are mapped to the symbols used in the IFD tagset. We call the resulting dictionary D BIN . Table 2 : Average tagging accuracy (%) using dictionaries extended with lemmata only from B\u00cdN. Average ratio of unknown words in testing is about 5.3%.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 227,
                        "end": 234,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "The above description holds when generating an extended dictionary for IceTagger, a tagger which does not need frequency information in the tag profile for words. In the case of TriTagger, we simply assume a uniform distribution, i.e. we mark each tag in the tag profile T w for word w with the frequency 1. Note that for TriTagger, extending the dictionary only affects the lexical probabilities from Equation 1 -the contextual probabilities remain unchanged.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "Recall (from Section 3) that HMM taggers handle unknown words by generating probability distributions for suffixes of various lengths using the words in the training data. We want the generation of these probability distributions to be only dependent on the data from D (from the IFD corpus), but not as well from D BIN . The reason is twofold. First, the IFD corpus is large enough for deriving reliable suffix probability distributions. Second, using all the words from a very large dictionary (like D EXT ) to generate the distributions significantly slows down the tagging process. This issue demonstrates the importance of having access to open-source software. We simply changed the loading module of TriTagger such that it does not use all dictionary entries for suffix handling. If the loading module finds a special entry in the dictionary (essentially a specially marked comment) it does not use the succeeding entries for suffix handling. We put the special entry into D EXT after the last entry from D and thus before the first entry from D BIN .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "Let us first consider the case of using file F 1 for extending the dictionaries, i.e. when only extracting lemmata from the database output. In that case, the resulting D BIN contains about 260,000 entries. Table 2 shows the accuracy of the taggers when using this version of the extended dictionary.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 207,
                        "end": 214,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "Comparing the results from Tables 2 and 1, we note the following:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 The average unknown word ratio decreases by about 1.5% (from about 6.8% to about 5.3%).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 The accuracy for known words decreases in the three taggers. The most probable reason is that the tag profile for some of the lemmata entries coming from D BIN contains gaps (see Section 3). This can be attributed to the fact that only a single line from the database output is selected when extracting the lemmata, but in many cases a lemma can have multiple analysis (tags). Note that this decrease in accuracy for known words is considerably higher in TriTagger (0.65 percentage points) than in IceTagger (0.24 percentage points). This is because the unknown word guesser IceMorphy, used by IceTagger, can fill into the tag profile gaps for certain morphological classes, as mentioned in Section 3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 The accuracy for unknown words increases in all the three taggers -the highest gain (3.42 percentage points) is obtained by IceTagger. For the case of IceTagger the reason is that IceMorphy first applies morphological analysis to unknown words (before trying ending analysis). For an unknown word u, Ice-Morphy searches for a morphologically related word (a known word) to u in its dictionary, i.e. a word containing the same stem but a different morphological suffix. The added lemmata entries can thus serve as related words for unknown words and since the morphological analysis module of IceTagger is quite accurate (Loftsson, 2008) , the added lemmata entries help to increase the tagging accuracy of unknown words.",
                "cite_spans": [
                    {
                        "start": 622,
                        "end": 638,
                        "text": "(Loftsson, 2008)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 The accuracy for all words increases in both IceTagger and HMM+Ice+HMM, but only by 0.20 and 0.16 percentage points, respectively. Obviously, the decreased accuracy for known words \"cut backs\" the gain obtained in the accuracy for unknown words. TriTagger's relatively large reduction in accuracy for known words is to blame for the reduction in its accuracy for all words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "Let us now consider the second case, when using file F 2 for extending the dictionaries. F 2 contains most of the entries from the database and the resulting D BIN contains about 2.6 million entries. Table 3 : Average tagging accuracy (%) using dictionaries extended with most of the data from B\u00cdN. Average ratio of unknown words in testing is 1.1%. Table 3 shows the accuracy of the taggers when using this large version of the extended dictionary. Comparing the results from Tables 3 and 1, we note the following:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 200,
                        "end": 207,
                        "text": "Table 3",
                        "ref_id": null
                    },
                    {
                        "start": 350,
                        "end": 357,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 The average unknown word ratio drops down to 1.1%. Concurrently, the accuracy for unknown words decreases substantially in all the three taggers. This is because the unknown word ratio drops dramatically and only \"hard\" unknown words remain -mostly proper nouns and foreign words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 The accuracy for known words decreases in the three taggers by 0.15-0.22 percentage points. This is a lower decrease than when using only lemmata entries from B\u00cdN (see Table 2 ) and can be explained by the fact that in this case the added entries from B\u00cdN should not contain tag profile gaps. Why do we then see a slight decrease in accuracy for known words? Recall that B\u00cdN does not contain any frequency information and therefore, for the added dictionary entries, we had to: i) assume a uniform distribution of tags in the the tag profile for TriTagger, and ii) assume no specific order for the tags in the tag profile for IceTagger (see the discussion on the order of the tags in Section 3). This is the most probable reason for the slight reduction in the tagging accuracy of known words.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 170,
                        "end": 177,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 The accuracy for all words increases significantly in all the three taggers, about 0.4-0.8 percentage points. This result confirms our logical assumption that the tagging accuracy can be increased by extending the dictionaries of taggers -even in the absence of reliable frequency information.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extending the dictionaries",
                "sec_num": "4.1"
            },
            {
                "text": "Recall from Section 3 that the tag profile in the dictionary used by IceTagger is assumed to be sorted. When a word cannot be fully disambiguated, this enables IceTagger to select the most frequent tag (the first tag) in the tag profile for the word. On the other hand, when frequency information is missing, as is the case for the B\u00cdN data, the first tag of the remaining tags in the tag profile may or may not be the most frequent tag. Thus, when IceTagger applies the defult heuristic to choose the first tag that may be an arbitrary choice. For a HMM tagger, the lack of reliable frequency information in a tag profile for a word can also cause problems. This follows directly from Equation 1, i.e. the term P (w i |t i ) stands for lexical probabilities which are computed using maximum likelihood estimation from a dictionary containing frequency information for each tag in the tag profiles for words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "In order to get reliable frequency information for the B\u00cdN data, we use a tagged corpus named M\u00cdM (\"M\u00f6rku\u00f0 \u00edslensk m\u00e1lheild\"; http: //mim.hi.is) which is being developed at the \u00c1rni Magn\u00fasson Institute for Icelandic Studies. The final size of the M\u00cdM corpus will be 25 million tokens, but the version that we use contains about 17 million tokens.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "Recall from Section 4.1 that D BIN denotes a dictionary derived from B\u00cdN. From the M\u00cdM corpus, we derive a frequency dictionary D M IM . We then create a new dictionary D N EW (based on D BIN ) in which frequency information for some of its tag profiles comes from D M IM . Specifically, we use the following procedure: Table 4 : Average tagging accuracy (%) using dictionaries extended with most of the data from B\u00cdN and with arranged tag profiles for some of the words. Average ratio of unknown words in testing is 1.1%.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 320,
                        "end": 327,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "1. Each word w in D BIN is looked up in D M IM .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "is considered more reliable than the one in",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "D M IM .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "3. Combine the new dictionary D N EW with the dictionary D used by a tagger C as explained in step 2 in Section 4.1.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "To illustrate, consider the following three tag profiles for the word \"sk\u00f6gultennur\" 'buckteeth': sk\u00f6gultennur nvfn nvfo sk\u00f6gultennur nvfo nken nvfn sk\u00f6gultennur nvfo nvfn",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "The first tag profile appears in D BIN . The tags \"nvfn\" and \"nvfo\" appear in alphabetic order. The second tag profile appears in D M IM (shown here without the frequency numbers for each tag). The tag profile is sorted in ascending order of frequency of the tags. Note that the second tag profile contains the tag \"nken\" (resulting from a tagging error in M\u00cdM) which does not appear in the first tag profile. When generating the resulting tag profile for D N EW -the third line in the illustration above -the tag \"nken\" does thus not appear. We used the procedure described above to generate extended dictionaries with frequency information for TriTagger and sorted tag profiles for IceTagger. Of the 2.6 million tag profiles in D BIN , 250,000 were found in D M IM (i.e. about 10%). This procedure thus \"arranged\" 250,000 of the tag profiles in D BIN . Table 4 shows the result of using the three taggers with extended dictionaries and with arranged tag profiles for some of the words. The accuracy of TriTagger improves from 91.66%, when using B\u00cdN data without frequency information (see Table 3) to 91.93% (3.25% error reduction). The accuracy of IceTagger improves from 92.53% to 92.78% (3.5% error reduction), and the accuracy of HMM+Ice+HMM improves from 93.32% to 93.48% (2.4% error reduction). The error reduction between our HMM+Ice+HMM tagger, with an extended dictionary and arranged tag profiles, and the base version of HMM+Ice+HMM (see Table 1), is 10.3%.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 855,
                        "end": 862,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Adding frequency information",
                "sec_num": "4.2"
            },
            {
                "text": "In Section 4.2, we showed that the accuracies of the three taggers can be improved significantly by arranging the tag profiles of the taggers using frequency information from the M\u00cdM corpus. We used about 17 million tokens from the corpus, but once it has been extended to its final size of 25 million tokens, we would like to repeat this part of the experiment, thus using more data, to see if the accuracy increases further.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Future work",
                "sec_num": "5"
            },
            {
                "text": "Note that we have only been able to arrange part of the tag profiles (about 10%) in the extended dictionaries by using frequency information from M\u00cdM. In future work, we would also like to experiment with arranging the remainder of the tag profiles according to unigram tag frequencies (for example, derived from the IFD corpus), i.e. tag frequenies that are not associated with individual words. We would then be seeking an answer to the question whether assigning unigram tag frequencies to the tag profiles of words, for which we do not have reliable frequency information, results in higher tagging accuracy compared to assigning a uniform distribution to the tag profiles (i.e. giving each tag the frequency 1 as we have done).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Future work",
                "sec_num": "5"
            },
            {
                "text": "We have experimented with adding data from a large morphological database to the dictionaries used by three open-source PoS taggers for Icelandic. Our results show that the tagging accuracy improves significantly when extending the dictionaries, and even further improvement in accuracy can be obtained by adding frequency information to some of the dictionary entries (tag profiles).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "Our best performing tagger, a serial combination of a linguistic rule-based tagger and a statistical tagger, obtains a state-of-the-art tagging accuracy of 93.48% when using extended dictionaries and added frequency information. This is equivalent to 10.3% error reduction compared to the best base tagger.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "IceNLP is available at http://icenlp. sourceforge.net",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Because of memory issues with the taggers, we exclude proper nouns that are names of places.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "The work presented in this paper was partly supported by the Icelandic Research Fund, grant 070025023.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Modern Icelandic Inflections",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Bjarnad\u00f3ttir",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Nordisk Sprogteknologi 2005. Museum Tusculanums Forlag",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. Bjarnad\u00f3ttir. 2005. Modern Icelandic Inflec- tions. In H. Holmboe, editor, Nordisk Sprogte- knologi 2005. Museum Tusculanums Forlag, Copen- hagen.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "TnT: A statistical part-of-speech tagger",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Brants",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 6 th Conference on Applied Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "T. Brants. 2000. TnT: A statistical part-of-speech tagger. In Proceedings of the 6 th Conference on Applied Natural Language Processing, Seattle, WA, USA.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Icelandic Data Driven Part of Speech Tagging",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Dredze",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Wallenberg",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of the 46 th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Dredze and J. Wallenberg. 2008. Icelandic Data Driven Part of Speech Tagging. In Proceedings of the 46 th Annual Meeting of the Association for Com- putational Linguistics: Human Language Technolo- gies, Columbus, OH, USA.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Testing Data-Driven Learning Algorithms for PoS Tagging of Icelandic",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Helgad\u00f3ttir",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Museum Tusculanums Forlag",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Helgad\u00f3ttir. 2005. Testing Data-Driven Learn- ing Algorithms for PoS Tagging of Icelandic. In H. Holmboe, editor, Nordisk Sprogteknologi 2004. Museum Tusculanums Forlag, Copenhagen.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Constraint Grammar: A Language-Independent System for Parsing Unrestricted Text",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Karlsson",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Voutilainen",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Heikkil\u00e4",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Anttila",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Karlsson, A. Voutilainen, J. Heikkil\u00e4, and A. Anttila. 1995. Constraint Grammar: A Language- Independent System for Parsing Unrestricted Text. Mouton de Gruyter, Berlin, Germany.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Improving the PoS tagging accuracy of Icelandic text",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Loftsson",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Kramarczyk",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Helgad\u00f3ttir",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "R\u00f6gnvaldsson",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proceedings of the 17 th Nordic Conference of Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "H. Loftsson, I. Kramarczyk, S. Helgad\u00f3ttir, and E. R\u00f6gnvaldsson. 2009. Improving the PoS tagg- ing accuracy of Icelandic text. In Proceedings of the 17 th Nordic Conference of Computational Linguis- tics (NODALIDA-2009), Odense, Denmark.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Tagging Icelandic text: A linguistic rule-based approach",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Loftsson",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Nordic Journal of Linguistics",
                "volume": "31",
                "issue": "1",
                "pages": "47--72",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "H. Loftsson. 2008. Tagging Icelandic text: A linguistic rule-based approach. Nordic Journal of Linguistics, 31(1):47-72.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "\u00cdslensk or\u00f0t\u00ed\u00f0nib\u00f3k",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Pind",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Magn\u00fasson",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Briem",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Pind, F. Magn\u00fasson, and S. Briem. 1991. \u00cdslensk or\u00f0t\u00ed\u00f0nib\u00f3k [The Icelandic Frequency Dictionary].",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Improving Morphosyntactic Tagging of Slovene Language through Meta-tagging",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Rupnik",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Gr\u010dar",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Erjavec",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Informatica",
                "volume": "32",
                "issue": "4",
                "pages": "437--444",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Rupnik, M. Gr\u010dar, and T. Erjavec. 2008. Improv- ing Morphosyntactic Tagging of Slovene Language through Meta-tagging. Informatica, 32(4):437-444.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Guided learning for bidirectional sequence classification",
                "authors": [
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Shen",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Satta",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Joshi",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the 45 th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "L. Shen, G. Satta, and A. Joshi. 2007. Guided learn- ing for bidirectional sequence classification. In Pro- ceedings of the 45 th Annual Meeting of the Associ- ation for Computational Linguistics, Prague, Czech Republic.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Feature-rich part-of-speech tagging with a cyclic dependency network",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Toutanova",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Klein",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [
                            "D"
                        ],
                        "last": "Manning",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Singer",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of HLT/NAACL 2003",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. Toutanova, D. Klein, C. D. Manning, and Y. Singer. 2003. Feature-rich part-of-speech tagging with a cyclic dependency network. In Proceedings of HLT/NAACL 2003, Edmonton, Canada.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Unsupervised Lexical Acquisition for Part of Speech Tagging",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Tufis",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Irimia",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Ion",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Ceausu",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of the 6 th International Conference on Language Resources and Evaluation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. Tufis, E. Irimia, R. Ion, and A. Ceausu. 2008. Un- supervised Lexical Acquisition for Part of Speech Tagging. In Proceedings of the 6 th International Conference on Language Resources and Evaluation (LREC 2008), Marrakech, Morocco.",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF1": {
                "html": null,
                "num": null,
                "content": "<table><tr><td>as our base</td></tr></table>",
                "text": "Average base tagging accuracy (%). Average ratio of unknown words in testing is 6.8%.",
                "type_str": "table"
            },
            "TABREF2": {
                "html": null,
                "num": null,
                "content": "<table><tr><td>Tagger</td><td colspan=\"2\">Unknown Known</td><td>All</td></tr><tr><td>TriTagger</td><td>74.44</td><td colspan=\"2\">91.53 90.63</td></tr><tr><td>IceTagger</td><td>80.44</td><td colspan=\"2\">92.83 92.18</td></tr><tr><td>HMM+Ice+HMM</td><td>80.53</td><td colspan=\"2\">93.57 92.89</td></tr></table>",
                "text": "2. Combine D BIN with the dictionary D generated by a tagger C during training (the number of entries in D are about 55,000, on the average). The result is a new dictionary D EXT . If a word exists in both D and D BIN then only the entry from D appears in D EXT .",
                "type_str": "table"
            },
            "TABREF4": {
                "html": null,
                "num": null,
                "content": "<table><tr><td>Tagger</td><td colspan=\"2\">Unknown Known</td><td>All</td></tr><tr><td>TriTagger</td><td>65.84</td><td colspan=\"2\">92.22 91.93</td></tr><tr><td>IceTagger</td><td>63.47</td><td colspan=\"2\">93.11 92.78</td></tr><tr><td>HMM+Ice+HMM</td><td>60.50</td><td colspan=\"2\">93.85 93.48</td></tr></table>",
                "text": "If w is not found in D M IM , then w and its tag profile is copied to D N EW . Each tag in the tag profile for w is given the frequency 1 (i.e. a uniform distribution is assumed). If w is found in D M IM , proceed to step 2.2. Order the tags in the tag profile for w in D BIN , according to the frequencies of the tags in the tag profile for w in D M IM . If a tag t for a word w is found in D M IM but not in D BIN , then t does not become a part of the tag profile for w in D N EW . The reason is that the dictionary D M IM is derived from a tagged corpus which has not been manually inspected and thus contains tagging errors. In other words, the tag profile from D BIN",
                "type_str": "table"
            }
        }
    }
}