HTTP 200 OK
Allow: GET, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept
{
"count": 50663,
"next": "https://ports.macports.org/api/v1/ports/?format=api&ordering=-created_at&page=506",
"previous": "https://ports.macports.org/api/v1/ports/?format=api&ordering=-created_at&page=504",
"results": [
{
"name": "tcLex",
"portdir": "textproc/tcLex",
"version": "1.2a1",
"license": "Tcl/Tk",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://membres.multimania.fr/fbonnet/Tcl/tcLex/index.en.htm",
"description": "TCL Lexer",
"long_description": "tcLex is a lexical analyzer generator extension to TCL written in C.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-9.0"
]
},
{
"type": "lib",
"ports": [
"tcl"
]
}
],
"depends_on": []
},
{
"name": "tEXT-iNSERTEr",
"portdir": "textproc/tEXT-iNSERTEr",
"version": "0.5.4",
"license": "BSD",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "https://github.com/lilca/tEXT-iNSERTEr",
"description": "tEXT-iNSERTEr is the tools for the inserting text hierarchically.",
"long_description": "The tools have a executable file 'tir' and a script 'tirc'. 'tir' is a simple text-inserter with customizable tags. Customizable tags can reference local-file, variable and pipe stream. 'tirc' generates a Makefile from the dependencies of inserting text under the specified directory.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [
{
"name": "",
"github": "lilca",
"ports_count": 1
}
],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-17"
]
}
],
"depends_on": []
},
{
"name": "tDOM",
"portdir": "textproc/tDOM",
"version": "0.9.1",
"license": "MPL-1.1 and MIT",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://tdom.org",
"description": "high performance XML data processing with easy and powerful Tcl scripting functionality",
"long_description": "tDOM combines high performance XML data processing with easy and powerful Tcl scripting functionality. tDOM should be one of the fastest ways to manipulate XML with a scripting language and uses very little memory in the process (for example the DOM tree of the XML recommendation in XML (160K) needs only about 450K in memory)! The tDOM project was started by Jochen Löwer. It is currently maintained by Rolf Ade.",
"active": true,
"categories": [
"devel",
"textproc"
],
"maintainers": [
{
"name": "neumann",
"github": "gustafn",
"ports_count": 3
}
],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-9.0"
]
},
{
"type": "lib",
"ports": [
"tcl",
"expat"
]
}
],
"depends_on": [
{
"type": "lib",
"ports": [
"scid"
]
}
]
},
{
"name": "syncha",
"portdir": "textproc/syncha",
"version": "0.3.1",
"license": "LGPL",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.cl.cs.titech.ac.jp/~ryu-i/syncha/",
"description": "a Japanese predicate-argument structure analyzer",
"long_description": "syncha is a Japanese predicate-argument structure analyzer.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "lib",
"ports": [
"lp_solve",
"cabocha"
]
}
],
"depends_on": []
},
{
"name": "syck",
"portdir": "textproc/syck",
"version": "0.70",
"license": "unknown",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "https://github.com/indeyets/syck",
"description": "An extension for reading and writing YAML",
"long_description": "Syck is an extension for reading and writing YAML swiftly in popular scripting languages. As Syck loads the YAML, it stores the data directly in your language's symbol table. This means speed. This means power. This means Do not disturb Syck because it is so focused on the task at hand that it will slay you mortally if you get in its way.",
"active": true,
"categories": [
"devel",
"textproc"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
}
],
"depends_on": [
{
"type": "lib",
"ports": [
"php53-syck",
"php54-syck",
"php55-syck",
"php56-syck"
]
}
]
},
{
"name": "sword-glossary-eren_he",
"portdir": "textproc/sword-glossary-eren_he",
"version": "1.1",
"license": "unknown",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=ERen_he",
"description": "Ergane English to Hebrew Glossary",
"long_description": "Ergane English to Hebrew Glossary",
"active": false,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-glossary-eren_grc",
"portdir": "textproc/sword-glossary-eren_grc",
"version": "1.1",
"license": "unknown",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=ERen_grc",
"description": "Ergane English to Ancient Greek Glossary",
"long_description": "Ergane English to Ancient Greek Glossary",
"active": false,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-glossary-eren_el",
"portdir": "textproc/sword-glossary-eren_el",
"version": "1.1",
"license": "unknown",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=ERen_el",
"description": "Ergane English to Greek Glossary",
"long_description": "Ergane English to Greek Glossary",
"active": false,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-9.0"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-dictionary-strongshebrew",
"portdir": "textproc/sword-dictionary-strongshebrew",
"version": "1.2",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=StrongsHebrew",
"description": "Strong's Hebrew Bible Dictionary",
"long_description": "Dictionaries of Hebrew and Greek Words taken from Strong's Exhaustive Concordance by James Strong, S.T.D., LL.D. 1890 Public Domain -- Copy Freely",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-dictionary-strongsgreek",
"portdir": "textproc/sword-dictionary-strongsgreek",
"version": "1.2",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=StrongsGreek",
"description": "Strong's Greek Bible Dictionary",
"long_description": "Dictionaries of Hebrew and Greek Words taken from Strong's Exhaustive Concordance by James Strong, S.T.D., LL.D. 1890",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-dictionary-nave",
"portdir": "textproc/sword-dictionary-nave",
"version": "1.1",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=Nave",
"description": "Nave's Topical Bible",
"long_description": "Nave's Topics were originaly produced by Orville J. Nave, A.M., D.D., LL.D. while serving as a Chaplin in the United States Army. He referred to this work as the result of fourteen years of delightful and untiring study of the Word of God. Nave's topics were originally published in the early 1900's, and a photo-offset reproduction is currently published by Hendrickson, ISBN 0-917006-02-X. The title page states that this work consists of 20,000+ topics and subtopics, and 100,000 references to the Scriptures.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-devotional-sme",
"portdir": "textproc/sword-devotional-sme",
"version": "1.7",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=SME",
"description": "C. H. Spurgeon's Morning and Evening: Daily Readins",
"long_description": "MORNING & EVENING: DAILY READINGS by Charles Spurgeon. This material was scanned from the printed edition published by Hendrickson. The scanned material was cleaned and formated by Ernie Stefanik, Derry PA.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-devotional-dbd",
"portdir": "textproc/sword-devotional-dbd",
"version": "1.4",
"license": "Restrictive",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=DBD",
"description": "Day By Day By Grace - Bob Hoekstra",
"long_description": "Day By Day By Grace, by Pastor Bob Hoekstra, director of Living In Christ Ministries, seeks to illuminate the grace of God through daily devotions set to inspire the child of Christ toward the heart of his Creator and Benefactor. By examining God's glorious plan and provision for us to live and serve daily by the grace of God",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-devotional-daily",
"portdir": "textproc/sword-devotional-daily",
"version": "1.0",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=Daily",
"description": "Jonathan Bagster's Daily Light on the Daily Path",
"long_description": "Daily Light on the Daily Path: A Devotional Textbook for Every Day of the Year, In the Very Words of Scripture. Prepared by Jonathan Bagster (1813-1872) and Other Members of His Family.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-commentary-tsk",
"portdir": "textproc/sword-commentary-tsk",
"version": "1.4",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=TSK",
"description": "Treasury of Scripture Knowledge Commentary",
"long_description": "Treasury of Scripture Knowledge: five-hundred thousand scripture references and parallel passages by Canne, Browne, Blayney, Scott, and others about 1880.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-commentary-jfb",
"portdir": "textproc/sword-commentary-jfb",
"version": "2.2",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=JFB",
"description": "Jamieson Fausset Brown Bible Commentary",
"long_description": "Robert Jamieson, A. R. Fausset and David Brown Commentary Critical and Explanatory on the Whole Bible (1871)",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-commentary-dtn",
"portdir": "textproc/sword-commentary-dtn",
"version": "1.0",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=DTN",
"description": "Darby Translation Notes",
"long_description": "Notes to J.N. Darby's Translation of the Bible",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-book-pilgrim",
"portdir": "textproc/sword-book-pilgrim",
"version": "1.0",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=Pilgrim",
"description": "The Pilgrim's Progress by John Bunyan (1628-1688)",
"long_description": "The Pilgrim's Progress by John Bunyan (1628-1688)",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-book-finney",
"portdir": "textproc/sword-book-finney",
"version": "1.1",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=Finney",
"description": "Sermons on Gospel Themes by Charles G. Finney (1792-1875)",
"long_description": "Sermons on Gospel Themes by Charles G. Finney (1792-1875)",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-bible-web",
"portdir": "textproc/sword-bible-web",
"version": "3.1",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=WEB",
"description": "World English Bible, includes footnotes",
"long_description": "The World English Bible is a 1997 revision of the American Standard Version of the Holy Bible, first published in 1901. It is in the Public Domain. Please feel free to copy and distribute it freely.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-bible-kjv",
"portdir": "textproc/sword-bible-kjv",
"version": "2.3",
"license": "Permissive",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=KJV",
"description": "King James Version (1769) with Strongs Numbers and Morphology",
"long_description": "This is the King James Version of the Holy Bible (also known as the Authorized Version) with embedded Strong's Numbers. The rights to the base text are held by the Crown of England. The Strong's numbers in the OT were obtained from The Bible Foundation: http://www.bf.org. The NT Strong's data was obtained from The KJV2003 Project at CrossWire: http://www.crosswire.org. These mechanisms provide a useful means for looking up the exact original language word in a lexicon that is keyed to Strong's numbers.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-bible-darby",
"portdir": "textproc/sword-bible-darby",
"version": "1.1",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=Darby",
"description": "Darby Bible (1889)",
"long_description": "A literal translation of the Old Testament (1890) and the New Testament (1884) By John Nelson Darby (1800-82)",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-bible-asv",
"portdir": "textproc/sword-bible-asv",
"version": "1.3",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=ASV",
"description": "American Standard Version (1901), includes footnotes",
"long_description": "The American Standard Version of 1901 is an Americanization of the English Revised Bible, which is an update of the KJV to less archaic spelling and greater accuracy of translation. It has been called The Rock of Biblical Honesty. It is the product of the work of over 50 Evangelical Christian scholars.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword-bible-akjv",
"portdir": "textproc/sword-bible-akjv",
"version": "1.4",
"license": "public-domain",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/modules/ModInfo.jsp?modName=AKJV",
"description": "American King James Version",
"long_description": "This is a new translation of the Bible, based on the original King James Version. It is a simple word for word update from the King James English. It has taken care to change nothing doctrinely, but to simply update the spelling and vocabulary. It has not changed the grammar because that could alter it doctrinely.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"sword"
]
}
],
"depends_on": []
},
{
"name": "sword",
"portdir": "textproc/sword",
"version": "1.9.0",
"license": "GPL-2",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.crosswire.org/sword/index.jsp",
"description": "cross-platform API/library for Bible software",
"long_description": "The SWORD Project is an open source, cross-platform (Linux, Windows, Solaris, etc.) API/library for Bible software with a constantly growing list of front-ends (GUI, textmode, web-based, etc.) and a library of over 200 text modules.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-17",
"autoconf",
"automake",
"libtool",
"pkgconfig"
]
},
{
"type": "lib",
"ports": [
"zlib",
"clucene",
"icu",
"openssl",
"curl"
]
}
],
"depends_on": [
{
"type": "lib",
"ports": [
"macsword",
"bibletime",
"sword-glossary-eren_el",
"sword-bible-akjv",
"sword-bible-asv",
"sword-bible-darby",
"sword-bible-kjv",
"sword-bible-web",
"sword-book-finney",
"sword-book-pilgrim",
"sword-commentary-dtn",
"sword-commentary-jfb",
"sword-commentary-tsk",
"sword-devotional-daily",
"sword-dictionary-nave",
"sword-dictionary-strongsgreek",
"sword-dictionary-strongshebrew",
"sword-devotional-sme",
"sword-devotional-dbd",
"sword-glossary-eren_grc",
"sword-glossary-eren_he"
]
}
]
},
{
"name": "string_replace",
"portdir": "textproc/string_replace",
"version": "0.1",
"license": "GPL-2",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://freecode.com/projects/string_replace",
"description": "searches and replaces a text string or regular expression",
"long_description": "This tool was designed to go through text files (meaning: text, html, php, etc.) to search for and replace a particular text string (or regular expression).",
"active": true,
"categories": [
"textproc",
"perl"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "run",
"ports": [
"perl5"
]
}
],
"depends_on": []
},
{
"name": "stardict-xmlittre",
"portdir": "textproc/stardict-xmlittre",
"version": "2.4.2",
"license": "unknown",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "http://francois.gannaz.free.fr/Littre/accueil.php",
"description": "XMLittré dictionary for stardict",
"long_description": "XMLittré dictionary for stardict.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
}
],
"depends_on": []
},
{
"name": "stardict",
"portdir": "textproc/stardict",
"version": "3.0.7",
"license": "GPL-3+",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://stardict-4.sourceforge.net/index_en.php",
"description": "A full featured gtk2 dictionary",
"long_description": "Very nice gnome dictionary. Can handle both local and web dictionaries. Though it needs a handful of gnome libraries for installation.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [
{
"name": "alexandernx",
"github": "",
"ports_count": 2
}
],
"variants": [
"gnome",
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"gnome-common",
"clang-17",
"itstool",
"intltool",
"yelp-tools",
"gnome-doc-utils",
"autoconf",
"automake",
"libtool",
"pkgconfig",
"gconf"
]
},
{
"type": "extract",
"ports": [
"p7zip"
]
},
{
"type": "lib",
"ports": [
"gtk3",
"libsigcxx2",
"json-glib",
"glib2",
"libcanberra",
"espeak-ng"
]
}
],
"depends_on": []
},
{
"name": "stanford-postagger",
"portdir": "textproc/stanford-postagger",
"version": "4.2.0",
"license": "GPL-2+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://nlp.stanford.edu/software/tagger.html",
"description": "a Java implementation of the log-linear part-of-speech taggers",
"long_description": "a Java implementation of the log-linear part-of-speech taggers",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
}
],
"depends_on": []
},
{
"name": "stanford-parser",
"portdir": "textproc/stanford-parser",
"version": "4.2.0",
"license": "GPL-2+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://nlp.stanford.edu/software/lex-parser.html",
"description": "a Java implementation of probabilistic natural language parsers",
"long_description": "a Java implementation of probabilistic natural language parsers, both highly optimized PCFG and lexicalized dependency parsers, and a lexicalized PCFG parser.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
}
],
"depends_on": []
},
{
"name": "stanford-ner",
"portdir": "textproc/stanford-ner",
"version": "4.2.0",
"license": "GPL-2+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://nlp.stanford.edu/software/CRF-NER.shtml",
"description": "a high-performance machine learning based named entity recognition system",
"long_description": "a high-performance machine learning based named entity recognition system, including facilities to train models from supervised training data and pre-trained models for English.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-14"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-spanish",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-models"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-german",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-models"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-french",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-models"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-english-kbp",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-models"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-english",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-models"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-chinese",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-models"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-arabic",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-models"
]
}
],
"depends_on": []
},
{
"name": "stanford-corenlp-models",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"openjdk11"
]
}
],
"depends_on": [
{
"type": "run",
"ports": [
"stanford-corenlp",
"stanford-corenlp-arabic",
"stanford-corenlp-chinese",
"stanford-corenlp-english",
"stanford-corenlp-english-kbp",
"stanford-corenlp-french",
"stanford-corenlp-german",
"stanford-corenlp-spanish"
]
}
]
},
{
"name": "stanford-corenlp",
"portdir": "textproc/stanford-corenlp",
"version": "3.9.2",
"license": "GPL-3+",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://stanfordnlp.github.io/CoreNLP/",
"description": "A Java suite of core NLP tools",
"long_description": "Stanford CoreNLP provides a set of natural language analysis tools written in Java. It can take raw human language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize and interpret dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases or word dependencies, and indicate which noun phrases refer to the same entities. It was originally developed for English, but now also provides varying levels of support for (Modern Standard) Arabic, (mainland) Chinese, French, German, and Spanish. Stanford CoreNLP is an integrated framework, which makes it very easy to apply a bunch of language analysis tools to a piece of text. Starting from plain text, you can run all the tools with just two lines of code. Its analyses provide the foundational building blocks for higher-level and domain-specific text understanding applications. Stanford CoreNLP is a set of stable and well-tested natural language processing tools, widely used by various groups in academia, industry, and government. The tools variously use rule-based, probabilistic machine learning, and deep learning components.",
"active": true,
"categories": [
"textproc",
"java"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-16"
]
},
{
"type": "extract",
"ports": [
"unzip"
]
},
{
"type": "lib",
"ports": [
"apache-ant",
"openjdk11"
]
},
{
"type": "run",
"ports": [
"stanford-corenlp-models"
]
}
],
"depends_on": [
{
"type": "run",
"ports": [
"py27-stanfordnlp",
"py38-stanfordnlp",
"py37-stanfordnlp",
"stanford-corenlp-arabic",
"stanford-corenlp-chinese",
"stanford-corenlp-english",
"stanford-corenlp-english-kbp",
"stanford-corenlp-french",
"stanford-corenlp-german",
"stanford-corenlp-spanish"
]
}
]
},
{
"name": "ssed",
"portdir": "textproc/ssed",
"version": "3.62",
"license": "GPL-2+",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://sed.sourceforge.net/",
"description": "Super-sed - a heavily enhanced version of sed",
"long_description": "This is a version of sed based on GNU sed. It is not a version of GNU sed, though. There are several new features (including in-place editing of files, extended regular expression syntax and a few new commands) and some bug fixes.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-9.0"
]
},
{
"type": "lib",
"ports": [
"gettext",
"libiconv"
]
}
],
"depends_on": []
},
{
"name": "libsphinxclient",
"portdir": "textproc/sphinx",
"version": "2.2.11",
"license": "LGPL-2",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://sphinxsearch.com/",
"description": "C library to talk to the Sphinx full-text search engine",
"long_description": "C library to talk to the Sphinx full-text search engine",
"active": true,
"categories": [
"net",
"textproc"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"autoconf",
"automake",
"libtool",
"clang-18"
]
}
],
"depends_on": [
{
"type": "lib",
"ports": [
"php53-sphinx",
"php54-sphinx",
"php55-sphinx",
"php56-sphinx"
]
}
]
},
{
"name": "sphinx",
"portdir": "textproc/sphinx",
"version": "2.2.11",
"license": "GPL-2",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://sphinxsearch.com/",
"description": "Sphinx is a full-text search engine",
"long_description": "Sphinx is a full-text search engine, meant to provide fast, size-efficient and relevant fulltext search functions to other applications. Sphinx was specially designed to integrate well with SQL databases and scripting languages. Currently built-in data sources support fetching data either via direct connection to MySQL or PostgreSQL, or from an XML pipe.",
"active": true,
"categories": [
"net",
"textproc"
],
"maintainers": [],
"variants": [
"postgresql91",
"postgresql92",
"postgresql93",
"postgresql94",
"postgresql95",
"postgresql96",
"mysql51",
"mysql55",
"mysql56",
"mysql57",
"mariadb",
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-18"
]
},
{
"type": "lib",
"ports": [
"mysql57",
"expat",
"libiconv"
]
}
],
"depends_on": [
{
"type": "build",
"ports": [
"iverilog-docs"
]
}
]
},
{
"name": "spark",
"portdir": "textproc/spark",
"version": "1.0.1",
"license": "MIT",
"platforms": "any",
"epoch": 0,
"replaced_by": null,
"homepage": "https://github.com/holman/spark",
"description": "sparklines for your shell.",
"long_description": "spark: sparklines for your shell.",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-17"
]
}
],
"depends_on": []
},
{
"name": "sowing",
"portdir": "textproc/sowing",
"version": "1.1.25",
"license": "GPL-2+",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://mcs.anl.gov",
"description": "A parser for fortran interfaces",
"long_description": "A parser for fortran interfaces",
"active": true,
"categories": [
"textproc"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-9.0"
]
}
],
"depends_on": [
{
"type": "build",
"ports": [
"petsc",
"slepc"
]
}
]
},
{
"name": "source-highlight",
"portdir": "textproc/source-highlight",
"version": "3.1.9",
"license": "GPL-3+",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "https://www.gnu.org/software/src-highlite/",
"description": "source-code syntax highlighter",
"long_description": "This program, given a source file, produces a document with syntax highlighting. At the moment it can handle: C/C++, C#, Bib, Bison, Caml, Changelog, CSS, Diff, Flex, Fortran, Html, Java, Javascript, Latex, Logtalk, Log files, Lua, Makefile, M4, ML, Pascal, Perl, PHP, PostScript, Prolog, Python, Ruby, Shell, Sql, Tcl, and XML.",
"active": true,
"categories": [
"devel",
"textproc"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"autoconf",
"automake",
"libtool",
"clang-17"
]
},
{
"type": "lib",
"ports": [
"ctags",
"boost176"
]
}
],
"depends_on": []
},
{
"name": "sloccount",
"portdir": "textproc/sloccount",
"version": "2.26",
"license": "GPL-2+",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "https://dwheeler.com/sloccount/",
"description": "program for counting lines of code in a large number of languages",
"long_description": "SLOCCount is a set of tools for counting physical Source Lines of Code (SLOC) in a large number of languages of a potentially large set of programs.",
"active": true,
"categories": [
"devel",
"textproc"
],
"maintainers": [],
"variants": [],
"dependencies": [
{
"type": "build",
"ports": [
"clang-9.0"
]
},
{
"type": "run",
"ports": [
"perl5",
"bash"
]
}
],
"depends_on": []
},
{
"name": "slearp",
"portdir": "textproc/slearp",
"version": "0.95",
"license": "GPL-3",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://sourceforge.jp/projects/slearp/",
"description": "structured learning and predict toolkit for tasks such as g2p conversion, based on discriminative leaning",
"long_description": "Slearp (structured learning and prediction) is the structured learning and predict toolkit for tasks such as g2p conversion, based on discriminative leaning.",
"active": true,
"categories": [
"textproc",
"math"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"clang-9.0"
]
}
],
"depends_on": []
},
{
"name": "py27-simstring",
"portdir": "textproc/simstring",
"version": "20140723",
"license": "BSD",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.chokkan.org/software/simstring/",
"description": "SimString Python module",
"long_description": "SimString Python module",
"active": true,
"categories": [
"textproc",
"math",
"python"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"autoconf",
"automake",
"libtool",
"swig-python",
"clang-17"
]
},
{
"type": "lib",
"ports": [
"python27"
]
}
],
"depends_on": []
},
{
"name": "simstring",
"portdir": "textproc/simstring",
"version": "20140723",
"license": "BSD",
"platforms": "darwin",
"epoch": 0,
"replaced_by": null,
"homepage": "http://www.chokkan.org/software/simstring/",
"description": "A fast and simple algorithm for approximate string matching/retrieval",
"long_description": "SimString is a simple library for fast approximate string retrieval. Approximate string retrieval finds strings in a database whose similarity with a query string is no smaller than a threshold. Finding not only identical but similar strings, approximate string retrieval has various applications including spelling correction, flexible dictionary matching, duplicate detection, and record linkage.",
"active": true,
"categories": [
"textproc",
"math"
],
"maintainers": [],
"variants": [
"universal"
],
"dependencies": [
{
"type": "build",
"ports": [
"autoconf",
"automake",
"libtool",
"clang-17"
]
}
],
"depends_on": []
}
]
}