diff --git a/app/[locale]/tools/base64/page.tsx b/app/[locale]/tools/base64/page.tsx
index 8cae272..e0facb2 100644
--- a/app/[locale]/tools/base64/page.tsx
+++ b/app/[locale]/tools/base64/page.tsx
@@ -8,11 +8,12 @@ import normalizeHex from "@/lib/normalizeHex";
import { validBase64 } from "@/lib/onesearch/baseCheck";
import { useTranslations } from "next-intl";
import { useEffect, useState } from "react";
+//@ts-ignore
import { utoa, atou } from "unicode-encode";
export default function Base64() {
- const t = useTranslations("tools");
- const [mode, setMode] = useState("Encode");
+ const t = useTranslations("tools.base64");
+ const [mode, setMode] = useState(t("encode"));
const [message, setMessage] = useState("");
const [messageResult, setMessageResult] = useState("");
const [isHex, setHex] = useState(false);
@@ -22,7 +23,7 @@ export default function Base64() {
setType("");
setInfo("");
setHex(false);
- if (mode == "Encode") {
+ if (mode == t("encode")) {
setMessageResult(utoa(message));
} else {
if (validBase64(message)) {
@@ -45,20 +46,20 @@ export default function Base64() {
}, [mode, message]);
return (
-
{t("base64.title")}
-
+
{t("title")}
+
);
}
diff --git a/components/tools/notice.tsx b/components/tools/notice.tsx
index ff69c90..f2e0150 100644
--- a/components/tools/notice.tsx
+++ b/components/tools/notice.tsx
@@ -23,7 +23,7 @@ export default function Notice(props: { type: string; info: string; class?: stri
{props.info}
diff --git a/lib/nlp/base.ts b/lib/nlp/base.ts
deleted file mode 100644
index 5e38b79..0000000
--- a/lib/nlp/base.ts
+++ /dev/null
@@ -1,32 +0,0 @@
-import { NLPResult } from "../onesearch/NLPResult";
-import { stopwords } from "./stopwords";
-
-export class NLP {
- result: NLPResult;
- constructor(
- public query: String,
- public task: String,
- public intentionKeywords?: String[],
- ) {
- this.result = new NLPResult();
- }
- public removeStopwords(extraStopwords: string[] = [], disableDefault: boolean = false){
- const list = disableDefault ? extraStopwords : stopwords.concat(extraStopwords);
- if (list.includes(this.query.trim())) {
- this.query = "";
- }
- for (let word of list){
- this.query = this.query.replace(new RegExp(`\\b${word}\\b`, 'gi'), '');
- }
- }
- public extractSlots(str: string, useNER = false): string[]{
- const slots: string[] = [];
-
- return slots;
- }
- public trim() {
- this.query = this.query.trim();
- const wordList = this.query.split(" ").filter(word => word !== "");
- this.query = wordList.join(" ");
- }
-}
\ No newline at end of file
diff --git a/lib/nlp/extract.ts b/lib/nlp/extract.ts
deleted file mode 100644
index 50024f9..0000000
--- a/lib/nlp/extract.ts
+++ /dev/null
@@ -1,7 +0,0 @@
-export default function slotExtract(str: string, keywords: string[]) {
- let r = str;
- for (let keyword of keywords) {
- r = r.replace(keyword, "");
- }
- return r.trim();
-}
diff --git a/lib/nlp/stopwords.ts b/lib/nlp/stopwords.ts
deleted file mode 100644
index 692301a..0000000
--- a/lib/nlp/stopwords.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-export const stopwords = ["a","about","above","after","again","against","all","am","an","and","any","are","aren't","as","at","be","because","been","before","being","below","between","both","but","by","can't","cannot","could","couldn't","did","didn't","do","does","doesn't","doing","don't","down","during","each","few","for","from","further","had","hadn't","has","hasn't","have","haven't","having","he","he'd","he'll","he's","her","here","here's","hers","herself","him","himself","his","how","how's","i","i'd","i'll","i'm","i've","if","in","into","is","isn't","it","it's","its","itself","let's","me","more","most","mustn't","my","myself","no","nor","not","of","off","on","once","only","or","other","ought","our","ours","ourselves","out","over","own","please","same","shan't","she","she'd","she'll","she's","should","shouldn't","so","some","such","than","that","that's","the","their","theirs","them","themselves","then","there","there's","these","they","they'd","they'll","they're","they've","this","those","through","to","too","under","until","up","very","was","wasn't","we","we'd","we'll","we're","we've","were","weren't","what","what's","when","when's","where","where's","which","while","who","who's","whom","why","why's","with","won't","would","wouldn't","you","you'd","you'll","you're","you've","your","yours","yourself","yourselves"];
-
-export const convertStopwords = ["transform", "change", "translate", "convert"];
\ No newline at end of file
diff --git a/lib/onesearch/baseCheck.tsx b/lib/onesearch/baseCheck.tsx
index 78609cd..5c7bfaf 100644
--- a/lib/onesearch/baseCheck.tsx
+++ b/lib/onesearch/baseCheck.tsx
@@ -1,90 +1,3 @@
-import slotExtract from "../nlp/extract";
-import removeStopwords from "../nlp/stopwords";
-import { NLPResult } from "./NLPResult";
-import { Kbd } from "@nextui-org/react";
-
-interface KeywordsDict {
- [key: string]: number;
-}
-
-interface IntentionsDict {
- [key: string]: number;
-}
-
export function validBase64(str: string) {
return str.length % 4 == 0 && /^[A-Za-z0-9+/]+[=]{0,2}$/.test(str);
-}
-
-export function base64NLP(str: string) {
- const keywords: KeywordsDict = {
- base64: 1,
- b64: 0.95,
- base: 0.5
- };
- let result = new NLPResult(null, null, 0.0, 0.0);
- for (let keyword of Object.keys(keywords)) {
- const pos = str.trim().indexOf(keyword);
- const l = str.length;
- const w = str.split(" ").length;
- if (w > 1 && (pos === 0 || pos == l)) {
- result.probability += keywords[keyword];
- break;
- }
- }
-
- const intentions: IntentionsDict = {
- decode: 0.1,
- encode: 1
- };
- for (let intention of Object.keys(intentions)) {
- const pos = str.trim().indexOf(intention);
- const w = str.split(" ").length;
- if (w > 1 && pos !== -1) {
- result.confidence += intentions[intention];
- result.intention = `base64.${intention}`;
- break;
- }
- }
-
- let processedQuery = str;
- if (result.intention === "base64.encode") {
- const blacklist = Object.keys(keywords).concat(Object.keys(intentions)).concat(["convert", "turn"]);
- processedQuery = slotExtract(str, blacklist);
- } else if (result.intention === "base64.decode") {
- processedQuery = removeStopwords(str, Object.keys(keywords).concat(Object.keys(intentions))).trim();
- }
- if (result.intention === "base64.decode") {
- if (validBase64(processedQuery)) {
- result.confidence = 1;
- } else {
- result.confidence = 0;
- }
- } else if (validBase64(processedQuery) && result.intention !== "base64.encode") {
- result.intention = "base64.decode";
- result.confidence += Math.max(1 / Math.log2(1 / processedQuery.length) + 1, 0);
- result.probability += Math.max(1 / Math.log2(1 / processedQuery.length) + 1, 0);
- }
-
- switch (result.intention) {
- case "base64.encode":
- result.suggestion = btoa(processedQuery);
- result.prompt = (
-
- Base64 Encode (Hit to copy):
-
- );
- break;
- case "base64.decode":
- if (result.confidence > 0.1) result.suggestion = atob(processedQuery);
- result.prompt = (
-
- Base64 Decode (Hit to copy):
-
- );
- break;
- default:
- break;
- }
-
- return result;
-}
+}
\ No newline at end of file
diff --git a/lib/version.ts b/lib/version.ts
index e394c32..9cd1228 100644
--- a/lib/version.ts
+++ b/lib/version.ts
@@ -1,3 +1,3 @@
-export const SPARKHOME_VERSION="4.17.0";
-export const CLIENT_VERSION="4.17.0";
+export const SPARKHOME_VERSION="4.17.1";
+export const CLIENT_VERSION="4.17.1";
export const NEXT_API_VERSION="4.14.3";
\ No newline at end of file
diff --git a/messages/en-US.json b/messages/en-US.json
index 11db361..35caf64 100644
--- a/messages/en-US.json
+++ b/messages/en-US.json
@@ -21,7 +21,12 @@
},
"tools": {
"base64": {
- "title": "Base64 tools - LuminaraUtils"
+ "title": "Base64 tools - LuminaraUtils",
+ "decode": "Decode",
+ "encode": "Encode",
+ "result": "Result: ",
+ "copy": "Copy",
+ "copied": "Copied"
}
}
}
diff --git a/messages/zh-CN.json b/messages/zh-CN.json
index 0992ac2..62b855c 100644
--- a/messages/zh-CN.json
+++ b/messages/zh-CN.json
@@ -18,7 +18,12 @@
},
"tools": {
"base64": {
- "title": "Base64 工具"
+ "title": "Base64 工具",
+ "decode": "解码",
+ "encode": "编码",
+ "result": "结果:",
+ "copy": "复制",
+ "copied": "已复制"
}
}
}
diff --git a/package.json b/package.json
index dbe8f46..539914e 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "sparkhome",
- "version": "4.17.0",
+ "version": "4.17.1",
"private": false,
"scripts": {
"dev": "next dev",
diff --git a/test/NLP/removeStopwords.test.ts b/test/NLP/removeStopwords.test.ts
deleted file mode 100644
index 63334d4..0000000
--- a/test/NLP/removeStopwords.test.ts
+++ /dev/null
@@ -1,18 +0,0 @@
-
-import { NLP } from "@/lib/nlp/base";
-import { convertStopwords } from "@/lib/nlp/stopwords";
-import { describe, expect, test } from "@jest/globals";
-
-describe("Test 1", () => {
- test("basic", () => {
- const nlp = new NLP("please", "remove-stopword");
- nlp.removeStopwords();
- expect(nlp.query).toBe("");
- });
- test("convert something", () => {
- const nlp = new NLP("please convert 1cm to m", "remove-stopword");
- nlp.removeStopwords(convertStopwords);
- nlp.trim();
- expect(nlp.query).toBe("1cm m");
- });
-});
diff --git a/test/base64.test.ts b/test/base64.test.ts
index 26aacc2..426cfab 100644
--- a/test/base64.test.ts
+++ b/test/base64.test.ts
@@ -1,11 +1,8 @@
-import { base64NLP } from "@/lib/onesearch/baseCheck";
+import { validBase64 } from "@/lib/onesearch/baseCheck";
import { describe, expect, test } from "@jest/globals";
describe("To auto-detect the intention of decoding an base64 string", () => {
test("Implicit declaration", () => {
- expect(base64NLP("base64 encode encode MjM6MjQgQXByIDI1LCAyMDI0").intention).toBe("base64.encode");
- expect(base64NLP("base64 encode encode MjM6MjQgQXByIDI1LCAyMDI0").suggestion).toBe(
- "ZW5jb2RlIE1qTTZNalFnUVhCeUlESTFMQ0F5TURJMA=="
- );
+ expect(validBase64("MjM6MjQgQXByIDI1LCAyMDI0")).toBe(true);
});
});