From efb6bc483faf9aa792a14a1a9917017d7ec5d800 Mon Sep 17 00:00:00 2001 From: rhel Date: Sep 24 2022 05:47:51 +0000 Subject: Update spec_parser.rb, examplemod-4-7fc4.noarch.zip, and 44 more files... --- diff --git a/.fpmbuild/spec_parser.rb b/.fpmbuild/spec_parser.rb index 3891e9a..ec47562 100644 --- a/.fpmbuild/spec_parser.rb +++ b/.fpmbuild/spec_parser.rb @@ -82,7 +82,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("Name:") - return pre_validate(line.sub("Name:", "")) + return pre_validate(line.gsub("Name:", "")) end end @@ -92,7 +92,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("Version:") - return pre_validate(line.sub("Version:", "")) + return pre_validate(line.gsub("Version:", "")) end end @@ -107,7 +107,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("Release:") - return pre_validate(line.sub("Release:", "").sub("%{?dist}", get_dist)) + return pre_validate(line.gsub("Release:", "").gsub("%{?dist}", get_dist)) end end @@ -118,7 +118,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("Summary:") - return pre_validate(line.sub("Summary:", "")) + return pre_validate(line.gsub("Summary:", "")) end end @@ -129,7 +129,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("License:") - return pre_validate(line.sub("License:", "")) + return pre_validate(line.gsub("License:", "")) end end @@ -140,7 +140,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("URL:") - return pre_validate(line.sub("URL:", "")) + return pre_validate(line.gsub("URL:", "")) end end @@ -150,7 +150,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("Source0:") - return pre_validate(line.sub("Source0:", "")) + return pre_validate(line.gsub("Source0:", "")) end end @@ -160,7 +160,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("BuildRequires:") - return pre_validate(line.sub("BuildRequires:", "")) + return pre_validate(line.gsub("BuildRequires:", "")) end end @@ -170,7 +170,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("Requires:") - return pre_validate(line.sub("Requires:", "")) + return pre_validate(line.gsub("Requires:", "")) end end @@ -180,7 +180,7 @@ class SpecParser spec_array = IO.readlines($Spec) spec_array.each do |line| if line .include?("Main-Class:") - return pre_validate(line.sub("Main-Class:", "")) + return pre_validate(line.gsub("Main-Class:", "")) end end @@ -224,58 +224,58 @@ class SpecParser def validate (string) str = pre_validate(string) if str.include?("%{?dist}") - str = str.sub("%{?dist}", get_dist) + str = str.gsub("%{?dist}", get_dist) end if str.include?("%{?name}") - str = str.sub("%{?name}", get_name) + str = str.gsub("%{?name}", get_name) end if str.include?("%{?version}") - str = str.sub("%{?version}", get_version) + str = str.gsub("%{?version}", get_version) end if str.include?("%{?release}") - str = str.sub("%{?release}", get_release) + str = str.gsub("%{?release}", get_release) end if str.include?("%{?summary}") - str = str.sub("%{?summary}", get_summary) + str = str.gsub("%{?summary}", get_summary) end if str.include?("%{?license}") - str = str.sub("%{?license}", get_licence) + str = str.gsub("%{?license}", get_licence) end if str.include?("%{?url}") - str = str.sub("%{?url}", get_url) + str = str.gsub("%{?url}", get_url) end if str.include?("%{?source0}") - str = str.sub("%{?source0}", get_source0) + str = str.gsub("%{?source0}", get_source0) end if str.include?("%{?build_requires}") - str = str.sub("%{?build_requires}", get_build_requires) + str = str.gsub("%{?build_requires}", get_build_requires) end if str.include?("%{?requires}") - str = str.sub("%{?requires}", get_requires) + str = str.gsub("%{?requires}", get_requires) end if str.include?("%{?description}") - str = str.sub("%{?description}", get_description) + str = str.gsub("%{?description}", get_description) end if str.include?("%{?fpmbuild_location}") - str = str.sub("%{?fpmbuild_location}", get_fpmbuild_location) + str = str.gsub("%{?fpmbuild_location}", get_fpmbuild_location) end if str.include?("%{?main-class}") - str = str.sub("%{?main-class}", get_main_class) + str = str.gsub("%{?main-class}", get_main_class) end if str.include?("%{?sources_location}") - str = str.sub("%{?sources_location}", get_sources_location) + str = str.gsub("%{?sources_location}", get_sources_location) end if str.include?("$FPM_BUILD_ROOT") - str = str.sub("$FPM_BUILD_ROOT", get_build_root) + str = str.gsub("$FPM_BUILD_ROOT", get_build_root) end if str.include?("%{?build_root}") - str = str.sub("%{?build_root}", get_build_root) + str = str.gsub("%{?build_root}", get_build_root) end if str.include?("%{?fpm_dir}") - str = str.sub("%{?fpm_dir}", get_fpm_dir) + str = str.gsub("%{?fpm_dir}", get_fpm_dir) end if str.include?("%{?sfpm_dir}") - str = str.sub("%{?sfpm_dir}", get_sfpm_dir) + str = str.gsub("%{?sfpm_dir}", get_sfpm_dir) end @@ -330,6 +330,15 @@ def execute (line) else + + manifest_text = File.read(manifest) + # manifest_text = manifest_text.gsub /^$\n/, '' https://stackoverflow.com/questions/7339292/ruby-remove-empty-lines-from-string + manifest_text = manifest_text.each_line.reject{|x| x.strip == ""}.join + File.open(manifest, 'w') do |file| + file.write(manifest_text) + end + + File.open(manifest, 'a') do |file| file.puts "Main-Class: "+get_main_class.strip end diff --git a/BUILD/examplemod-4-7fc4.noarch.zip b/BUILD/examplemod-4-7fc4.noarch.zip index 3cc46ea..10bcfad 100644 Binary files a/BUILD/examplemod-4-7fc4.noarch.zip and b/BUILD/examplemod-4-7fc4.noarch.zip differ diff --git a/BUILD/examplemod-4-8fc4.noarch.zip b/BUILD/examplemod-4-8fc4.noarch.zip new file mode 100644 index 0000000..47335a2 Binary files /dev/null and b/BUILD/examplemod-4-8fc4.noarch.zip differ diff --git a/FPMS/examplemod-4-7fc4.noarch.fpm b/FPMS/examplemod-4-7fc4.noarch.fpm index 6104a53..5bf140f 100644 Binary files a/FPMS/examplemod-4-7fc4.noarch.fpm and b/FPMS/examplemod-4-7fc4.noarch.fpm differ diff --git a/FPMS/examplemod-4-7fc4.noarch.fpm.jar b/FPMS/examplemod-4-7fc4.noarch.fpm.jar new file mode 100644 index 0000000..65729da Binary files /dev/null and b/FPMS/examplemod-4-7fc4.noarch.fpm.jar differ diff --git a/FPMS/examplemod-4-8fc4.noarch.fpm b/FPMS/examplemod-4-8fc4.noarch.fpm new file mode 100644 index 0000000..cd73d9c Binary files /dev/null and b/FPMS/examplemod-4-8fc4.noarch.fpm differ diff --git a/SFPMS/examplemod-4-7fc4.noarch.sfpm b/SFPMS/examplemod-4-7fc4.noarch.sfpm index e653b41..20708e2 100644 Binary files a/SFPMS/examplemod-4-7fc4.noarch.sfpm and b/SFPMS/examplemod-4-7fc4.noarch.sfpm differ diff --git a/SFPMS/examplemod-4-8fc4.noarch.sfpm b/SFPMS/examplemod-4-8fc4.noarch.sfpm new file mode 100644 index 0000000..a898482 Binary files /dev/null and b/SFPMS/examplemod-4-8fc4.noarch.sfpm differ diff --git a/SOURCES/.gitignore b/SOURCES/.gitignore new file mode 100644 index 0000000..e10e727 --- /dev/null +++ b/SOURCES/.gitignore @@ -0,0 +1 @@ +/.metadata/ diff --git a/SOURCES/.metadata/.log b/SOURCES/.metadata/.log index ec25613..29bdbb9 100644 --- a/SOURCES/.metadata/.log +++ b/SOURCES/.metadata/.log @@ -1,4 +1,4 @@ -!SESSION 2022-09-02 18:11:23.137 ----------------------------------------------- +!SESSION 2022-09-23 21:52:45.704 ----------------------------------------------- eclipse.buildId=12.21.3.GA-v20220127-1221-B485 java.version=11.0.15 java.vendor=Red Hat, Inc. @@ -6,36 +6,36 @@ BootLoader constants: OS=linux, ARCH=x86_64, WS=gtk, NL=en_GB Framework arguments: -product com.jboss.devstudio.core.product Command-line arguments: -os linux -ws gtk -arch x86_64 -product com.jboss.devstudio.core.product -!ENTRY org.eclipse.ui 2 0 2022-09-02 18:13:19.378 +!ENTRY org.eclipse.ui 2 0 2022-09-23 21:53:03.900 !MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-02 18:13:19.378 +!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-23 21:53:03.901 !MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' -!ENTRY org.eclipse.ui 2 0 2022-09-02 18:13:35.030 +!ENTRY org.eclipse.ui 2 0 2022-09-23 21:53:10.385 !MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-02 18:13:35.030 +!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-23 21:53:10.385 !MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' -!ENTRY org.eclipse.ui 2 0 2022-09-02 18:13:35.164 +!ENTRY org.eclipse.ui 2 0 2022-09-23 21:53:10.482 !MESSAGE Warnings while parsing the images from the 'org.eclipse.ui.commandImages' extension point. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-02 18:13:35.164 +!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-23 21:53:10.482 !MESSAGE Cannot bind to an undefined command: plug-in='com.genuitec.eclipse.theming.ui', id='com.genuitec.myeclipse.help.darktheme.commandid' -!ENTRY com.genuitec.eclipse.theming.ui 1 0 2022-09-02 18:13:40.316 +!ENTRY com.genuitec.eclipse.theming.ui 1 0 2022-09-23 21:53:12.561 !MESSAGE A DevStyle Theme is being activated on this workspace; preparing colors and configuration -!ENTRY com.genuitec.eclipse.theming.ui 1 0 2022-09-02 18:13:40.349 +!ENTRY com.genuitec.eclipse.theming.ui 1 0 2022-09-23 21:53:12.598 !MESSAGE DevStyle has archived current color configurations to alternate preferences - will be restored when switching out of the DevStyle Theme -!ENTRY org.eclipse.jface 2 0 2022-09-02 18:13:45.606 +!ENTRY org.eclipse.jface 2 0 2022-09-23 21:53:14.551 !MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-02 18:13:45.606 +!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-23 21:53:14.552 !MESSAGE A conflict occurred for CTRL+SHIFT+T: Binding(CTRL+SHIFT+T, ParameterizedCommand(Command(org.eclipse.jdt.ui.navigate.open.type,Open Type, Open a type in a Java editor, Category(org.eclipse.ui.category.navigate,Navigate,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@82300e1, + org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@6b5b5f94, ,,true),null), org.eclipse.ui.defaultAcceleratorConfiguration, org.eclipse.ui.contexts.window,,,system) @@ -43,17 +43,17 @@ Binding(CTRL+SHIFT+T, ParameterizedCommand(Command(org.eclipse.lsp4e.symbolinworkspace,Go to Symbol in Workspace, , Category(org.eclipse.lsp4e.category,Language Servers,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@6fa9e6db, + org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@22e494dc, ,,true),null), org.eclipse.ui.defaultAcceleratorConfiguration, org.eclipse.ui.contexts.window,,,system) -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-02 18:13:45.606 +!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-23 21:53:14.552 !MESSAGE A conflict occurred for ALT+SHIFT+R: Binding(ALT+SHIFT+R, ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.rename.element,Rename - Refactoring , Rename the selected element, Category(org.eclipse.jdt.ui.category.refactoring,Refactor - Java,Java Refactoring Actions,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@47d41516, + org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@7484236d, ,,true),null), org.eclipse.ui.defaultAcceleratorConfiguration, org.eclipse.ui.contexts.window,,,system) @@ -61,33 +61,33 @@ Binding(ALT+SHIFT+R, ParameterizedCommand(Command(org.eclipse.ui.edit.rename,Rename, Rename the selected item, Category(org.eclipse.ui.category.file,File,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@1040bcfb, + org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@52b8633c, ,,true),null), org.eclipse.ui.defaultAcceleratorConfiguration, org.eclipse.ui.contexts.window,,,system) -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-02 18:14:03.668 +!ENTRY org.eclipse.ui.navigator 2 0 2022-09-23 21:53:21.613 !MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-02 18:14:03.670 +!ENTRY org.eclipse.ui.navigator 2 0 2022-09-23 21:53:21.616 !MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering -!ENTRY org.eclipse.update.configurator 4 0 2022-09-02 18:15:16.349 +!ENTRY org.eclipse.update.configurator 4 0 2022-09-23 21:54:23.344 !MESSAGE Can't find bundle for base name feature, locale en_GB -!ENTRY org.eclipse.update.configurator 4 0 2022-09-02 18:15:20.685 +!ENTRY org.eclipse.update.configurator 4 0 2022-09-23 21:54:23.483 !MESSAGE Can't find bundle for base name feature, locale en_GB -!ENTRY org.eclipse.update.configurator 4 0 2022-09-02 18:15:21.080 +!ENTRY org.eclipse.update.configurator 4 0 2022-09-23 21:54:23.494 !MESSAGE Can't find bundle for base name feature, locale en_GB -!ENTRY org.eclipse.update.configurator 4 0 2022-09-02 18:15:21.139 +!ENTRY org.eclipse.update.configurator 4 0 2022-09-23 21:54:23.498 !MESSAGE Can't find bundle for base name feature, locale en_GB -!ENTRY org.eclipse.update.configurator 4 0 2022-09-02 18:15:22.097 +!ENTRY org.eclipse.update.configurator 4 0 2022-09-23 21:54:23.528 !MESSAGE Can't find bundle for base name feature, locale en_GB -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-02 18:16:03.699 +!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-23 21:55:12.172 !MESSAGE Provisioning exception !STACK 1 org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. @@ -106,323 +106,10 @@ org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://do at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-02 18:16:03.700 +!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-23 21:55:12.173 !MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. -!ENTRY org.jboss.tools.central 4 0 2022-09-02 18:16:11.928 -!MESSAGE RefreshTutorialsJob started - -!ENTRY org.jboss.tools.central 4 0 2022-09-02 18:16:13.383 -!MESSAGE RefreshTutorialsJob stopped - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1002 2022-09-02 18:17:33.800 -!MESSAGE Unable to read repository at http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client. -!STACK 0 -java.io.IOException: http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client is not a valid repository location. - at org.eclipse.equinox.internal.p2.metadata.repository.SimpleMetadataRepositoryFactory.load(SimpleMetadataRepositoryFactory.java:103) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-02 18:17:45.463 -!MESSAGE No repository found at http://download.eclipse.org/technology/m2e/releases/. - -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-02 18:19:58.733 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.query(AbstractRepositoryManager.java:1179) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.hasRequirements(EclipsePluginLatestVersionProvider.java:86) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionInternal(EclipsePluginLatestVersionProvider.java:76) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionOnline(EclipsePluginLatestVersionProvider.java:50) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersion(EclipsePluginLatestVersionProvider.java:42) - at org.zeroturnaround.jrebel.client.update.PluginLatestVersion.findLatestVersion(PluginLatestVersion.java:24) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.getUpdateSiteVersion(JRebelPluginUpdate.java:159) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.checkForNewVersion(JRebelPluginUpdate.java:51) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.startNewVersionCheck(JRebelPluginUpdate.java:39) - at org.zeroturnaround.jrebel.ide.common.CommonStartup.lambda$startNewVersionCheck$4(CommonStartup.java:314) - at org.zeroturnaround.common.util.ExecutorUtil$RunnableWrapper.run(ExecutorUtil.java:161) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-02 18:19:58.733 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - -!ENTRY org.eclipse.equinox.p2.transport.ecf 4 1002 2022-09-02 18:20:12.336 -!MESSAGE HTTP Server 'Bad Gateway' : http://dl.bintray.com/testng-team/testng-p2-release/content.xml -!STACK 1 -org.eclipse.ecf.filetransfer.BrowseFileTransferException: HttpComponents connection error response code 502. - at org.eclipse.ecf.provider.filetransfer.httpclient45.HttpClientFileSystemBrowser.runRequest(HttpClientFileSystemBrowser.java:278) - at org.eclipse.ecf.provider.filetransfer.browse.AbstractFileSystemBrowser$DirectoryJob.run(AbstractFileSystemBrowser.java:71) - at org.eclipse.core.internal.jobs.Worker.run(Worker.java:63) -!SUBENTRY 1 org.eclipse.ecf.identity 4 0 2022-09-02 18:20:12.337 -!MESSAGE HttpComponents connection error response code 502. - -!ENTRY org.eclipse.wildwebdeveloper.xml 4 0 2022-09-02 23:02:21.405 -!MESSAGE /schema/xsd/windup-jboss-ruleset.xsd -!STACK 0 -java.io.FileNotFoundException: /schema/xsd/windup-jboss-ruleset.xsd - at org.eclipse.osgi.storage.url.bundleentry.Handler.findBundleEntry(Handler.java:55) - at org.eclipse.osgi.storage.url.BundleResourceHandler.openConnection(BundleResourceHandler.java:174) - at java.base/java.net.URL.openConnection(URL.java:1099) - at org.eclipse.core.internal.boot.PlatformURLConnection.connect(PlatformURLConnection.java:115) - at org.eclipse.core.internal.boot.PlatformURLConnection.getURLAsLocal(PlatformURLConnection.java:240) - at org.eclipse.core.internal.runtime.PlatformURLConverter.toFileURL(PlatformURLConverter.java:37) - at org.eclipse.core.runtime.FileLocator.toFileURL(FileLocator.java:261) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.lambda$3(XMLCatalogs.java:75) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:183) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.ReferencePipeline$Head.forEach(ReferencePipeline.java:658) - at java.base/java.util.stream.ReferencePipeline$7$1.accept(ReferencePipeline.java:274) - at java.base/java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:177) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) - at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) - at java.base/java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:150) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:173) - at java.base/java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) - at java.base/java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:497) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getWTPExtensionCatalog(XMLCatalogs.java:63) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getAllCatalogs(XMLCatalogs.java:50) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLPreferenceConstants.storePreferencesToLemminxOptions(XMLPreferenceConstants.java:90) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.mergeCustomInitializationOptions(XMLLanguageServer.java:163) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.getInitializationOptions(XMLLanguageServer.java:158) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$8(LanguageServerWrapper.java:353) - at java.base/java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:1072) - at java.base/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1705) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.exec(CompletableFuture.java:1692) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) - -!ENTRY org.eclipse.jface 2 0 2022-09-02 23:02:21.892 -!MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-02 23:02:21.892 -!MESSAGE A conflict occurred for CTRL+SHIFT+G: -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.search.references.in.workspace,References in Workspace, - Search for references to the selected element in the workspace, - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@64b78f0, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.jboss.tools.seam.ui.find.references,Find Seam References, - Find Seam References, - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@300cb05, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.wst.sse.ui.structuredTextEditorScope,,,system) -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.springframework.ide.eclipse.beans.ui.editor.commands.searchBeanReferences,Spring Beans References Search, - , - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@4eca3215, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.wst.sse.ui.structuredTextEditorScope,,,system) - -!ENTRY org.eclipse.lsp4e 4 0 2022-09-02 23:08:44.592 -!MESSAGE org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. -!STACK 0 -java.util.concurrent.ExecutionException: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:395) - at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2022) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$13(LanguageServerWrapper.java:425) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.run(CompletableFuture.java:1736) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.exec(CompletableFuture.java:1728) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) -Caused by: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.handleResponse(RemoteEndpoint.java:209) - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.consume(RemoteEndpoint.java:193) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$4(LanguageServerWrapper.java:260) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.handleMessage(StreamMessageProducer.java:194) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.listen(StreamMessageProducer.java:94) - at org.eclipse.lsp4j.jsonrpc.json.ConcurrentMessageProcessor.run(ConcurrentMessageProcessor.java:113) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) - -!ENTRY org.jboss.tools.windup.runtime 1 0 2022-09-02 23:08:45.058 -!MESSAGE Attempting to retrieve ExecutionBuilder from registry. -!SESSION 2022-09-16 14:11:35.671 ----------------------------------------------- -eclipse.buildId=12.21.3.GA-v20220127-1221-B485 -java.version=11.0.15 -java.vendor=Red Hat, Inc. -BootLoader constants: OS=linux, ARCH=x86_64, WS=gtk, NL=en_GB -Framework arguments: -product com.jboss.devstudio.core.product -Command-line arguments: -os linux -ws gtk -arch x86_64 -product com.jboss.devstudio.core.product - -!ENTRY org.eclipse.ui 2 0 2022-09-16 14:11:51.518 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-16 14:11:51.518 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.ui 2 0 2022-09-16 14:11:54.615 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-16 14:11:54.616 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.jface 2 0 2022-09-16 14:11:58.816 -!MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-16 14:11:58.816 -!MESSAGE A conflict occurred for CTRL+SHIFT+T: -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.jdt.ui.navigate.open.type,Open Type, - Open a type in a Java editor, - Category(org.eclipse.ui.category.navigate,Navigate,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@1d8f923f, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.lsp4e.symbolinworkspace,Go to Symbol in Workspace, - , - Category(org.eclipse.lsp4e.category,Language Servers,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@534b0af3, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-16 14:11:58.816 -!MESSAGE A conflict occurred for ALT+SHIFT+R: -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.rename.element,Rename - Refactoring , - Rename the selected element, - Category(org.eclipse.jdt.ui.category.refactoring,Refactor - Java,Java Refactoring Actions,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@57fcc568, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.ui.edit.rename,Rename, - Rename the selected item, - Category(org.eclipse.ui.category.file,File,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@43ab3f26, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) - -!ENTRY org.eclipse.wildwebdeveloper.xml 4 0 2022-09-16 14:12:11.612 -!MESSAGE /schema/xsd/windup-jboss-ruleset.xsd -!STACK 0 -java.io.FileNotFoundException: /schema/xsd/windup-jboss-ruleset.xsd - at org.eclipse.osgi.storage.url.bundleentry.Handler.findBundleEntry(Handler.java:55) - at org.eclipse.osgi.storage.url.BundleResourceHandler.openConnection(BundleResourceHandler.java:174) - at java.base/java.net.URL.openConnection(URL.java:1099) - at org.eclipse.core.internal.boot.PlatformURLConnection.connect(PlatformURLConnection.java:115) - at org.eclipse.core.internal.boot.PlatformURLConnection.getURLAsLocal(PlatformURLConnection.java:240) - at org.eclipse.core.internal.runtime.PlatformURLConverter.toFileURL(PlatformURLConverter.java:37) - at org.eclipse.core.runtime.FileLocator.toFileURL(FileLocator.java:261) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.lambda$3(XMLCatalogs.java:75) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:183) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.ReferencePipeline$Head.forEach(ReferencePipeline.java:658) - at java.base/java.util.stream.ReferencePipeline$7$1.accept(ReferencePipeline.java:274) - at java.base/java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:177) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) - at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) - at java.base/java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:150) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:173) - at java.base/java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) - at java.base/java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:497) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getWTPExtensionCatalog(XMLCatalogs.java:63) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getAllCatalogs(XMLCatalogs.java:50) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLPreferenceConstants.storePreferencesToLemminxOptions(XMLPreferenceConstants.java:90) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.mergeCustomInitializationOptions(XMLLanguageServer.java:163) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.getInitializationOptions(XMLLanguageServer.java:158) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$8(LanguageServerWrapper.java:353) - at java.base/java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:1072) - at java.base/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1705) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.exec(CompletableFuture.java:1692) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-16 14:12:14.858 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-16 14:12:14.862 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:13:17.127 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:13:17.296 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:13:17.307 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:13:17.311 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:13:17.339 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-16 14:14:03.451 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-16 14:14:03.452 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - -!ENTRY org.eclipse.lsp4e 2 0 2022-09-16 14:15:04.955 +!ENTRY org.eclipse.lsp4e 2 0 2022-09-23 21:56:27.286 !MESSAGE Javadoc unavailable. Failed to obtain it. !STACK 0 java.lang.InterruptedException @@ -434,7 +121,7 @@ java.lang.InterruptedException at org.eclipse.jdt.internal.ui.text.java.hover.JavaEditorTextHoverProxy.getHoverInfo2(JavaEditorTextHoverProxy.java:89) at org.eclipse.jface.text.TextViewerHoverManager$1.run(TextViewerHoverManager.java:155) -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1002 2022-09-16 14:15:25.576 +!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1002 2022-09-23 21:56:46.663 !MESSAGE Unable to read repository at http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client. !STACK 0 java.io.IOException: http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client is not a valid repository location. @@ -448,10 +135,10 @@ java.io.IOException: http://marketplace.eclipse.org/marketplace-client-intro?mpc at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-16 14:15:37.791 +!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-23 21:57:00.302 !MESSAGE No repository found at http://download.eclipse.org/technology/m2e/releases/. -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-16 14:17:47.690 +!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-23 21:59:07.789 !MESSAGE Provisioning exception !STACK 1 org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. @@ -490,127 +177,32 @@ org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://do at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:829) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-16 14:17:47.690 +!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-23 21:59:07.789 !MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. -!ENTRY org.eclipse.equinox.p2.transport.ecf 4 1002 2022-09-16 14:18:00.366 +!ENTRY org.eclipse.equinox.p2.transport.ecf 4 1002 2022-09-23 21:59:21.780 !MESSAGE HTTP Server 'Bad Gateway' : http://dl.bintray.com/testng-team/testng-p2-release/content.xml !STACK 1 org.eclipse.ecf.filetransfer.BrowseFileTransferException: HttpComponents connection error response code 502. at org.eclipse.ecf.provider.filetransfer.httpclient45.HttpClientFileSystemBrowser.runRequest(HttpClientFileSystemBrowser.java:278) at org.eclipse.ecf.provider.filetransfer.browse.AbstractFileSystemBrowser$DirectoryJob.run(AbstractFileSystemBrowser.java:71) at org.eclipse.core.internal.jobs.Worker.run(Worker.java:63) -!SUBENTRY 1 org.eclipse.ecf.identity 4 0 2022-09-16 14:18:00.367 +!SUBENTRY 1 org.eclipse.ecf.identity 4 0 2022-09-23 21:59:21.781 !MESSAGE HttpComponents connection error response code 502. -!ENTRY org.eclipse.lsp4e 4 0 2022-09-16 14:39:59.236 -!MESSAGE org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. -!STACK 0 -java.util.concurrent.ExecutionException: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:395) - at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2022) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$13(LanguageServerWrapper.java:425) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.run(CompletableFuture.java:1736) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.exec(CompletableFuture.java:1728) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) -Caused by: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.handleResponse(RemoteEndpoint.java:209) - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.consume(RemoteEndpoint.java:193) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$4(LanguageServerWrapper.java:260) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.handleMessage(StreamMessageProducer.java:194) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.listen(StreamMessageProducer.java:94) - at org.eclipse.lsp4j.jsonrpc.json.ConcurrentMessageProcessor.run(ConcurrentMessageProcessor.java:113) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) - -!ENTRY org.jboss.tools.windup.runtime 1 0 2022-09-16 14:40:00.297 -!MESSAGE Attempting to retrieve ExecutionBuilder from registry. -!SESSION 2022-09-16 14:40:34.854 ----------------------------------------------- -eclipse.buildId=12.21.3.GA-v20220127-1221-B485 -java.version=11.0.15 -java.vendor=Red Hat, Inc. -BootLoader constants: OS=linux, ARCH=x86_64, WS=gtk, NL=en_GB -Framework arguments: -product com.jboss.devstudio.core.product -Command-line arguments: -os linux -ws gtk -arch x86_64 -product com.jboss.devstudio.core.product - -!ENTRY org.eclipse.ui 2 0 2022-09-16 14:40:45.483 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-16 14:40:45.483 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.ui 2 0 2022-09-16 14:40:47.690 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-16 14:40:47.690 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.jface 2 0 2022-09-16 14:40:50.261 -!MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-16 14:40:50.261 -!MESSAGE A conflict occurred for CTRL+SHIFT+T: -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.jdt.ui.navigate.open.type,Open Type, - Open a type in a Java editor, - Category(org.eclipse.ui.category.navigate,Navigate,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@4fb52fa5, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.lsp4e.symbolinworkspace,Go to Symbol in Workspace, - , - Category(org.eclipse.lsp4e.category,Language Servers,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@2eed4dd6, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-16 14:40:50.262 -!MESSAGE A conflict occurred for ALT+SHIFT+R: -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.rename.element,Rename - Refactoring , - Rename the selected element, - Category(org.eclipse.jdt.ui.category.refactoring,Refactor - Java,Java Refactoring Actions,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@1f0eabb8, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.ui.edit.rename,Rename, - Rename the selected item, - Category(org.eclipse.ui.category.file,File,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@3976dacd, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-16 14:41:00.045 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-16 14:41:00.069 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:42:02.362 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:42:02.578 -!MESSAGE Can't find bundle for base name feature, locale en_GB +!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-23 22:00:25.680 +!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:42:02.595 -!MESSAGE Can't find bundle for base name feature, locale en_GB +!ENTRY org.jboss.tools.common.model 4 0 2022-09-23 22:00:44.694 +!MESSAGE JarAccess: cannot obtain entry for path '.classpath' from jar '/home/rhel/Documents/fpmbuild/SOURCES/examplemod/libs/FCUserDev.jar'. -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:42:02.602 -!MESSAGE Can't find bundle for base name feature, locale en_GB +!ENTRY org.jboss.tools.common.model 4 0 2022-09-23 22:00:44.746 +!MESSAGE JarAccess: cannot obtain entry for path '.project' from jar '/home/rhel/Documents/fpmbuild/SOURCES/examplemod/libs/FCUserDev.jar'. -!ENTRY org.eclipse.update.configurator 4 0 2022-09-16 14:42:02.647 -!MESSAGE Can't find bundle for base name feature, locale en_GB +!ENTRY org.jboss.tools.common.model 4 0 2022-09-23 22:00:44.747 +!MESSAGE JarAccess: cannot obtain entry for path 'pom.xml' from jar '/home/rhel/Documents/fpmbuild/SOURCES/examplemod/libs/FCUserDev.jar'. -!ENTRY org.eclipse.lsp4e 2 0 2022-09-16 14:42:27.441 +!ENTRY org.eclipse.lsp4e 2 0 2022-09-23 22:03:18.122 !MESSAGE Javadoc unavailable. Failed to obtain it. !STACK 0 java.lang.InterruptedException @@ -622,880 +214,5 @@ java.lang.InterruptedException at org.eclipse.jdt.internal.ui.text.java.hover.JavaEditorTextHoverProxy.getHoverInfo2(JavaEditorTextHoverProxy.java:89) at org.eclipse.jface.text.TextViewerHoverManager$1.run(TextViewerHoverManager.java:155) -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-16 14:42:43.743 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-16 14:42:43.744 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1002 2022-09-16 14:44:04.404 -!MESSAGE Unable to read repository at http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client. -!STACK 0 -java.io.IOException: http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client is not a valid repository location. - at org.eclipse.equinox.internal.p2.metadata.repository.SimpleMetadataRepositoryFactory.load(SimpleMetadataRepositoryFactory.java:103) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-16 14:44:16.629 -!MESSAGE No repository found at http://download.eclipse.org/technology/m2e/releases/. - -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-16 14:46:13.358 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.query(AbstractRepositoryManager.java:1179) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.hasRequirements(EclipsePluginLatestVersionProvider.java:86) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionInternal(EclipsePluginLatestVersionProvider.java:76) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionOnline(EclipsePluginLatestVersionProvider.java:50) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersion(EclipsePluginLatestVersionProvider.java:42) - at org.zeroturnaround.jrebel.client.update.PluginLatestVersion.findLatestVersion(PluginLatestVersion.java:24) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.getUpdateSiteVersion(JRebelPluginUpdate.java:159) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.checkForNewVersion(JRebelPluginUpdate.java:51) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.startNewVersionCheck(JRebelPluginUpdate.java:39) - at org.zeroturnaround.jrebel.ide.common.CommonStartup.lambda$startNewVersionCheck$4(CommonStartup.java:314) - at org.zeroturnaround.common.util.ExecutorUtil$RunnableWrapper.run(ExecutorUtil.java:161) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-16 14:46:13.359 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - -!ENTRY org.eclipse.equinox.p2.transport.ecf 4 1002 2022-09-16 14:46:26.584 -!MESSAGE HTTP Server 'Bad Gateway' : http://dl.bintray.com/testng-team/testng-p2-release/content.xml -!STACK 1 -org.eclipse.ecf.filetransfer.BrowseFileTransferException: HttpComponents connection error response code 502. - at org.eclipse.ecf.provider.filetransfer.httpclient45.HttpClientFileSystemBrowser.runRequest(HttpClientFileSystemBrowser.java:278) - at org.eclipse.ecf.provider.filetransfer.browse.AbstractFileSystemBrowser$DirectoryJob.run(AbstractFileSystemBrowser.java:71) - at org.eclipse.core.internal.jobs.Worker.run(Worker.java:63) -!SUBENTRY 1 org.eclipse.ecf.identity 4 0 2022-09-16 14:46:26.585 -!MESSAGE HttpComponents connection error response code 502. - -!ENTRY org.eclipse.wildwebdeveloper.xml 4 0 2022-09-16 18:51:53.092 -!MESSAGE /schema/xsd/windup-jboss-ruleset.xsd -!STACK 0 -java.io.FileNotFoundException: /schema/xsd/windup-jboss-ruleset.xsd - at org.eclipse.osgi.storage.url.bundleentry.Handler.findBundleEntry(Handler.java:55) - at org.eclipse.osgi.storage.url.BundleResourceHandler.openConnection(BundleResourceHandler.java:174) - at java.base/java.net.URL.openConnection(URL.java:1099) - at org.eclipse.core.internal.boot.PlatformURLConnection.connect(PlatformURLConnection.java:115) - at org.eclipse.core.internal.boot.PlatformURLConnection.getURLAsLocal(PlatformURLConnection.java:240) - at org.eclipse.core.internal.runtime.PlatformURLConverter.toFileURL(PlatformURLConverter.java:37) - at org.eclipse.core.runtime.FileLocator.toFileURL(FileLocator.java:261) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.lambda$3(XMLCatalogs.java:75) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:183) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.ReferencePipeline$Head.forEach(ReferencePipeline.java:658) - at java.base/java.util.stream.ReferencePipeline$7$1.accept(ReferencePipeline.java:274) - at java.base/java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:177) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) - at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) - at java.base/java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:150) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:173) - at java.base/java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) - at java.base/java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:497) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getWTPExtensionCatalog(XMLCatalogs.java:63) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getAllCatalogs(XMLCatalogs.java:50) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLPreferenceConstants.storePreferencesToLemminxOptions(XMLPreferenceConstants.java:90) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.mergeCustomInitializationOptions(XMLLanguageServer.java:163) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.getInitializationOptions(XMLLanguageServer.java:158) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$8(LanguageServerWrapper.java:353) - at java.base/java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:1072) - at java.base/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1705) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.exec(CompletableFuture.java:1692) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) - -!ENTRY org.eclipse.jface 2 0 2022-09-16 18:51:53.713 -!MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-16 18:51:53.713 -!MESSAGE A conflict occurred for CTRL+SHIFT+G: -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.search.references.in.workspace,References in Workspace, - Search for references to the selected element in the workspace, - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@5995c894, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.jboss.tools.seam.ui.find.references,Find Seam References, - Find Seam References, - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@736c6334, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.wst.sse.ui.structuredTextEditorScope,,,system) -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.springframework.ide.eclipse.beans.ui.editor.commands.searchBeanReferences,Spring Beans References Search, - , - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@2b63b731, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.wst.sse.ui.structuredTextEditorScope,,,system) - -!ENTRY org.eclipse.ui 4 0 2022-09-16 22:16:05.759 -!MESSAGE Unhandled event loop exception -!STACK 0 -org.eclipse.swt.SWTException: Widget is disposed - at org.eclipse.swt.SWT.error(SWT.java:4893) - at org.eclipse.swt.SWT.error(SWT.java:4808) - at org.eclipse.swt.SWT.error(SWT.java:4779) - at org.eclipse.swt.widgets.Widget.error(Widget.java:560) - at org.eclipse.swt.widgets.Widget.checkWidget(Widget.java:475) - at org.eclipse.swt.widgets.Composite.setFocus(Composite.java:1675) - at org.eclipse.m2e.core.ui.internal.wizards.MavenArtifactComponent.setFocus(MavenArtifactComponent.java:174) - at org.eclipse.swt.widgets.Control.fixFocus(Control.java:321) - at org.eclipse.swt.widgets.Control.releaseWidget(Control.java:4787) - at org.eclipse.swt.widgets.Composite.releaseWidget(Composite.java:1584) - at org.eclipse.swt.widgets.Combo.releaseWidget(Combo.java:2006) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1350) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Canvas.releaseChildren(Canvas.java:281) - at org.eclipse.swt.widgets.Decorations.releaseChildren(Decorations.java:505) - at org.eclipse.swt.widgets.Shell.releaseChildren(Shell.java:3399) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Widget.dispose(Widget.java:538) - at org.eclipse.swt.widgets.Shell.dispose(Shell.java:3316) - at org.eclipse.jface.window.Window.close(Window.java:335) - at org.eclipse.jface.dialogs.Dialog.close(Dialog.java:988) - at org.eclipse.jface.wizard.WizardDialog.hardClose(WizardDialog.java:894) - at org.eclipse.jface.wizard.WizardDialog.finishPressed(WizardDialog.java:840) - at org.eclipse.jface.wizard.WizardDialog.buttonPressed(WizardDialog.java:472) - at org.eclipse.jface.dialogs.Dialog.lambda$0(Dialog.java:619) - at org.eclipse.swt.events.SelectionListener$1.widgetSelected(SelectionListener.java:84) - at org.eclipse.swt.widgets.TypedListener.handleEvent(TypedListener.java:252) - at org.eclipse.swt.widgets.EventTable.sendEvent(EventTable.java:89) - at org.eclipse.swt.widgets.Display.sendEvent(Display.java:5884) - at org.eclipse.swt.widgets.Widget.sendEvent(Widget.java:1522) - at org.eclipse.swt.widgets.Display.runDeferredEvents(Display.java:5126) - at org.eclipse.swt.widgets.Display.readAndDispatch(Display.java:4576) - at org.eclipse.jface.window.Window.runEventLoop(Window.java:823) - at org.eclipse.jface.window.Window.open(Window.java:799) - at org.eclipse.ui.internal.actions.NewWizardShortcutAction.run(NewWizardShortcutAction.java:130) - at org.eclipse.jface.action.Action.runWithEvent(Action.java:474) - at org.eclipse.jface.action.ActionContributionItem.handleWidgetSelection(ActionContributionItem.java:580) - at org.eclipse.jface.action.ActionContributionItem.lambda$4(ActionContributionItem.java:414) - at org.eclipse.swt.widgets.EventTable.sendEvent(EventTable.java:89) - at org.eclipse.swt.widgets.Display.sendEvent(Display.java:5884) - at org.eclipse.swt.widgets.Widget.sendEvent(Widget.java:1522) - at org.eclipse.swt.widgets.Display.runDeferredEvents(Display.java:5126) - at org.eclipse.swt.widgets.Display.readAndDispatch(Display.java:4576) - at org.eclipse.e4.ui.internal.workbench.swt.PartRenderingEngine$5.run(PartRenderingEngine.java:1150) - at org.eclipse.core.databinding.observable.Realm.runWithDefault(Realm.java:338) - at org.eclipse.e4.ui.internal.workbench.swt.PartRenderingEngine.run(PartRenderingEngine.java:1041) - at org.eclipse.e4.ui.internal.workbench.E4Workbench.createAndRunUI(E4Workbench.java:155) - at org.eclipse.ui.internal.Workbench.lambda$3(Workbench.java:644) - at org.eclipse.core.databinding.observable.Realm.runWithDefault(Realm.java:338) - at org.eclipse.ui.internal.Workbench.createAndRunWorkbench(Workbench.java:551) - at org.eclipse.ui.PlatformUI.createAndRunWorkbench(PlatformUI.java:156) - at org.eclipse.ui.internal.ide.application.IDEApplication.start(IDEApplication.java:152) - at org.eclipse.equinox.internal.app.EclipseAppHandle.run(EclipseAppHandle.java:203) - at org.eclipse.core.runtime.internal.adaptor.EclipseAppLauncher.runApplication(EclipseAppLauncher.java:136) - at org.eclipse.core.runtime.internal.adaptor.EclipseAppLauncher.start(EclipseAppLauncher.java:104) - at org.eclipse.core.runtime.adaptor.EclipseStarter.run(EclipseStarter.java:401) - at org.eclipse.core.runtime.adaptor.EclipseStarter.run(EclipseStarter.java:255) - at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) - at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) - at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) - at java.base/java.lang.reflect.Method.invoke(Method.java:566) - at org.eclipse.equinox.launcher.Main.invokeFramework(Main.java:659) - at org.eclipse.equinox.launcher.Main.basicRun(Main.java:596) - at org.eclipse.equinox.launcher.Main.run(Main.java:1467) - at org.eclipse.equinox.launcher.Main.main(Main.java:1440) - -!ENTRY org.eclipse.ui 4 0 2022-09-16 22:16:05.761 -!MESSAGE Unhandled event loop exception -!STACK 0 -org.eclipse.swt.SWTException: Widget is disposed - at org.eclipse.swt.SWT.error(SWT.java:4893) - at org.eclipse.swt.SWT.error(SWT.java:4808) - at org.eclipse.swt.SWT.error(SWT.java:4779) - at org.eclipse.swt.widgets.Widget.error(Widget.java:560) - at org.eclipse.swt.widgets.Widget.checkWidget(Widget.java:475) - at org.eclipse.swt.widgets.Composite.setFocus(Composite.java:1675) - at org.eclipse.m2e.core.ui.internal.wizards.MavenArtifactComponent.setFocus(MavenArtifactComponent.java:174) - at org.eclipse.swt.widgets.Control.fixFocus(Control.java:321) - at org.eclipse.swt.widgets.Control.releaseWidget(Control.java:4787) - at org.eclipse.swt.widgets.Composite.releaseWidget(Composite.java:1584) - at org.eclipse.swt.widgets.Group.releaseWidget(Group.java:314) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1350) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Composite.releaseChildren(Composite.java:1567) - at org.eclipse.swt.widgets.Canvas.releaseChildren(Canvas.java:281) - at org.eclipse.swt.widgets.Decorations.releaseChildren(Decorations.java:505) - at org.eclipse.swt.widgets.Shell.releaseChildren(Shell.java:3399) - at org.eclipse.swt.widgets.Widget.release(Widget.java:1338) - at org.eclipse.swt.widgets.Control.release(Control.java:4764) - at org.eclipse.swt.widgets.Widget.dispose(Widget.java:538) - at org.eclipse.swt.widgets.Shell.dispose(Shell.java:3316) - at org.eclipse.jface.window.Window.close(Window.java:335) - at org.eclipse.jface.dialogs.Dialog.close(Dialog.java:988) - at org.eclipse.jface.wizard.WizardDialog.hardClose(WizardDialog.java:894) - at org.eclipse.jface.wizard.WizardDialog.finishPressed(WizardDialog.java:840) - at org.eclipse.jface.wizard.WizardDialog.buttonPressed(WizardDialog.java:472) - at org.eclipse.jface.dialogs.Dialog.lambda$0(Dialog.java:619) - at org.eclipse.swt.events.SelectionListener$1.widgetSelected(SelectionListener.java:84) - at org.eclipse.swt.widgets.TypedListener.handleEvent(TypedListener.java:252) - at org.eclipse.swt.widgets.EventTable.sendEvent(EventTable.java:89) - at org.eclipse.swt.widgets.Display.sendEvent(Display.java:5884) - at org.eclipse.swt.widgets.Widget.sendEvent(Widget.java:1522) - at org.eclipse.swt.widgets.Display.runDeferredEvents(Display.java:5126) - at org.eclipse.swt.widgets.Display.readAndDispatch(Display.java:4576) - at org.eclipse.jface.window.Window.runEventLoop(Window.java:823) - at org.eclipse.jface.window.Window.open(Window.java:799) - at org.eclipse.ui.internal.actions.NewWizardShortcutAction.run(NewWizardShortcutAction.java:130) - at org.eclipse.jface.action.Action.runWithEvent(Action.java:474) - at org.eclipse.jface.action.ActionContributionItem.handleWidgetSelection(ActionContributionItem.java:580) - at org.eclipse.jface.action.ActionContributionItem.lambda$4(ActionContributionItem.java:414) - at org.eclipse.swt.widgets.EventTable.sendEvent(EventTable.java:89) - at org.eclipse.swt.widgets.Display.sendEvent(Display.java:5884) - at org.eclipse.swt.widgets.Widget.sendEvent(Widget.java:1522) - at org.eclipse.swt.widgets.Display.runDeferredEvents(Display.java:5126) - at org.eclipse.swt.widgets.Display.readAndDispatch(Display.java:4576) - at org.eclipse.e4.ui.internal.workbench.swt.PartRenderingEngine$5.run(PartRenderingEngine.java:1150) - at org.eclipse.core.databinding.observable.Realm.runWithDefault(Realm.java:338) - at org.eclipse.e4.ui.internal.workbench.swt.PartRenderingEngine.run(PartRenderingEngine.java:1041) - at org.eclipse.e4.ui.internal.workbench.E4Workbench.createAndRunUI(E4Workbench.java:155) - at org.eclipse.ui.internal.Workbench.lambda$3(Workbench.java:644) - at org.eclipse.core.databinding.observable.Realm.runWithDefault(Realm.java:338) - at org.eclipse.ui.internal.Workbench.createAndRunWorkbench(Workbench.java:551) - at org.eclipse.ui.PlatformUI.createAndRunWorkbench(PlatformUI.java:156) - at org.eclipse.ui.internal.ide.application.IDEApplication.start(IDEApplication.java:152) - at org.eclipse.equinox.internal.app.EclipseAppHandle.run(EclipseAppHandle.java:203) - at org.eclipse.core.runtime.internal.adaptor.EclipseAppLauncher.runApplication(EclipseAppLauncher.java:136) - at org.eclipse.core.runtime.internal.adaptor.EclipseAppLauncher.start(EclipseAppLauncher.java:104) - at org.eclipse.core.runtime.adaptor.EclipseStarter.run(EclipseStarter.java:401) - at org.eclipse.core.runtime.adaptor.EclipseStarter.run(EclipseStarter.java:255) - at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) - at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) - at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) - at java.base/java.lang.reflect.Method.invoke(Method.java:566) - at org.eclipse.equinox.launcher.Main.invokeFramework(Main.java:659) - at org.eclipse.equinox.launcher.Main.basicRun(Main.java:596) - at org.eclipse.equinox.launcher.Main.run(Main.java:1467) - at org.eclipse.equinox.launcher.Main.main(Main.java:1440) - -!ENTRY org.eclipse.jdt.core 4 2 2022-09-16 22:16:21.250 -!MESSAGE Problems occurred when invoking code from plug-in: "org.eclipse.jdt.core". -!STACK 0 -java.lang.NullPointerException - at org.eclipse.lsp4mp.jdt.core.utils.JDTMicroProfileUtils.getProjectURI(JDTMicroProfileUtils.java:62) - at org.eclipse.lsp4mp.jdt.core.utils.JDTMicroProfileUtils.getProjectURI(JDTMicroProfileUtils.java:52) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.processDelta(MicroProfilePropertiesListenerManager.java:105) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.processDeltaChildren(MicroProfilePropertiesListenerManager.java:85) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.processDelta(MicroProfilePropertiesListenerManager.java:95) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.elementChanged(MicroProfilePropertiesListenerManager.java:76) - at org.eclipse.jdt.internal.core.DeltaProcessor$3.run(DeltaProcessor.java:1755) - at org.eclipse.core.runtime.SafeRunner.run(SafeRunner.java:45) - at org.eclipse.jdt.internal.core.DeltaProcessor.notifyListeners(DeltaProcessor.java:1743) - at org.eclipse.jdt.internal.core.DeltaProcessor.firePostChangeDelta(DeltaProcessor.java:1576) - at org.eclipse.jdt.internal.core.DeltaProcessor.fire(DeltaProcessor.java:1552) - at org.eclipse.jdt.internal.core.DeltaProcessor.notifyAndFire(DeltaProcessor.java:2273) - at org.eclipse.jdt.internal.core.DeltaProcessor.resourceChanged(DeltaProcessor.java:2163) - at org.eclipse.jdt.internal.core.DeltaProcessingState.resourceChanged(DeltaProcessingState.java:501) - at org.eclipse.core.internal.events.NotificationManager$1.run(NotificationManager.java:305) - at org.eclipse.core.runtime.SafeRunner.run(SafeRunner.java:45) - at org.eclipse.core.internal.events.NotificationManager.notify(NotificationManager.java:295) - at org.eclipse.core.internal.events.NotificationManager.broadcastChanges(NotificationManager.java:158) - at org.eclipse.core.internal.resources.Workspace.broadcastPostChange(Workspace.java:381) - at org.eclipse.core.internal.resources.Workspace.checkpoint(Workspace.java:576) - at org.eclipse.ltk.core.refactoring.PerformChangeOperation.lambda$0(PerformChangeOperation.java:263) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2313) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2338) - at org.eclipse.ltk.core.refactoring.PerformChangeOperation.executeChange(PerformChangeOperation.java:295) - at org.eclipse.ltk.internal.ui.refactoring.UIPerformChangeOperation.executeChange(UIPerformChangeOperation.java:94) - at org.eclipse.ltk.core.refactoring.PerformChangeOperation.run(PerformChangeOperation.java:219) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2313) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2338) - at org.eclipse.ltk.internal.ui.refactoring.WorkbenchRunnableAdapter.run(WorkbenchRunnableAdapter.java:89) - at org.eclipse.jface.operation.ModalContext$ModalContextThread.run(ModalContext.java:122) - -!ENTRY org.eclipse.jdt.core 4 4 2022-09-16 22:16:21.251 -!MESSAGE Exception occurred in listener of Java element change notification -!STACK 0 -java.lang.NullPointerException - at org.eclipse.lsp4mp.jdt.core.utils.JDTMicroProfileUtils.getProjectURI(JDTMicroProfileUtils.java:62) - at org.eclipse.lsp4mp.jdt.core.utils.JDTMicroProfileUtils.getProjectURI(JDTMicroProfileUtils.java:52) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.processDelta(MicroProfilePropertiesListenerManager.java:105) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.processDeltaChildren(MicroProfilePropertiesListenerManager.java:85) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.processDelta(MicroProfilePropertiesListenerManager.java:95) - at org.eclipse.lsp4mp.jdt.internal.core.MicroProfilePropertiesListenerManager$MicroProfileListener.elementChanged(MicroProfilePropertiesListenerManager.java:76) - at org.eclipse.jdt.internal.core.DeltaProcessor$3.run(DeltaProcessor.java:1755) - at org.eclipse.core.runtime.SafeRunner.run(SafeRunner.java:45) - at org.eclipse.jdt.internal.core.DeltaProcessor.notifyListeners(DeltaProcessor.java:1743) - at org.eclipse.jdt.internal.core.DeltaProcessor.firePostChangeDelta(DeltaProcessor.java:1576) - at org.eclipse.jdt.internal.core.DeltaProcessor.fire(DeltaProcessor.java:1552) - at org.eclipse.jdt.internal.core.DeltaProcessor.notifyAndFire(DeltaProcessor.java:2273) - at org.eclipse.jdt.internal.core.DeltaProcessor.resourceChanged(DeltaProcessor.java:2163) - at org.eclipse.jdt.internal.core.DeltaProcessingState.resourceChanged(DeltaProcessingState.java:501) - at org.eclipse.core.internal.events.NotificationManager$1.run(NotificationManager.java:305) - at org.eclipse.core.runtime.SafeRunner.run(SafeRunner.java:45) - at org.eclipse.core.internal.events.NotificationManager.notify(NotificationManager.java:295) - at org.eclipse.core.internal.events.NotificationManager.broadcastChanges(NotificationManager.java:158) - at org.eclipse.core.internal.resources.Workspace.broadcastPostChange(Workspace.java:381) - at org.eclipse.core.internal.resources.Workspace.checkpoint(Workspace.java:576) - at org.eclipse.ltk.core.refactoring.PerformChangeOperation.lambda$0(PerformChangeOperation.java:263) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2313) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2338) - at org.eclipse.ltk.core.refactoring.PerformChangeOperation.executeChange(PerformChangeOperation.java:295) - at org.eclipse.ltk.internal.ui.refactoring.UIPerformChangeOperation.executeChange(UIPerformChangeOperation.java:94) - at org.eclipse.ltk.core.refactoring.PerformChangeOperation.run(PerformChangeOperation.java:219) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2313) - at org.eclipse.core.internal.resources.Workspace.run(Workspace.java:2338) - at org.eclipse.ltk.internal.ui.refactoring.WorkbenchRunnableAdapter.run(WorkbenchRunnableAdapter.java:89) - at org.eclipse.jface.operation.ModalContext$ModalContextThread.run(ModalContext.java:122) - -!ENTRY org.eclipse.lsp4e 4 0 2022-09-16 22:54:12.368 -!MESSAGE org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. -!STACK 0 -java.util.concurrent.ExecutionException: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:395) - at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2022) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$13(LanguageServerWrapper.java:425) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.run(CompletableFuture.java:1736) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.exec(CompletableFuture.java:1728) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) -Caused by: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.handleResponse(RemoteEndpoint.java:209) - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.consume(RemoteEndpoint.java:193) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$4(LanguageServerWrapper.java:260) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.handleMessage(StreamMessageProducer.java:194) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.listen(StreamMessageProducer.java:94) - at org.eclipse.lsp4j.jsonrpc.json.ConcurrentMessageProcessor.run(ConcurrentMessageProcessor.java:113) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) - -!ENTRY org.jboss.tools.windup.runtime 1 0 2022-09-16 22:54:13.149 +!ENTRY org.jboss.tools.windup.runtime 1 0 2022-09-23 22:38:00.204 !MESSAGE Attempting to retrieve ExecutionBuilder from registry. -!SESSION 2022-09-17 16:42:04.082 ----------------------------------------------- -eclipse.buildId=12.21.3.GA-v20220127-1221-B485 -java.version=11.0.15 -java.vendor=Red Hat, Inc. -BootLoader constants: OS=linux, ARCH=x86_64, WS=gtk, NL=en_GB -Framework arguments: -product com.jboss.devstudio.core.product -Command-line arguments: -os linux -ws gtk -arch x86_64 -product com.jboss.devstudio.core.product - -!ENTRY org.eclipse.ui 2 0 2022-09-17 16:44:57.459 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-17 16:44:57.459 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.ui 2 0 2022-09-17 16:45:32.170 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-17 16:45:32.170 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.jface 2 0 2022-09-17 16:45:57.152 -!MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-17 16:45:57.153 -!MESSAGE A conflict occurred for CTRL+SHIFT+T: -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.jdt.ui.navigate.open.type,Open Type, - Open a type in a Java editor, - Category(org.eclipse.ui.category.navigate,Navigate,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@7625ce5a, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.lsp4e.symbolinworkspace,Go to Symbol in Workspace, - , - Category(org.eclipse.lsp4e.category,Language Servers,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@7787fce1, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-17 16:45:57.153 -!MESSAGE A conflict occurred for ALT+SHIFT+R: -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.rename.element,Rename - Refactoring , - Rename the selected element, - Category(org.eclipse.jdt.ui.category.refactoring,Refactor - Java,Java Refactoring Actions,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@89ee343, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.ui.edit.rename,Rename, - Rename the selected item, - Category(org.eclipse.ui.category.file,File,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@10fa5a77, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) - -!ENTRY org.eclipse.wildwebdeveloper.xml 4 0 2022-09-17 16:46:42.487 -!MESSAGE /schema/xsd/windup-jboss-ruleset.xsd -!STACK 0 -java.io.FileNotFoundException: /schema/xsd/windup-jboss-ruleset.xsd - at org.eclipse.osgi.storage.url.bundleentry.Handler.findBundleEntry(Handler.java:55) - at org.eclipse.osgi.storage.url.BundleResourceHandler.openConnection(BundleResourceHandler.java:174) - at java.base/java.net.URL.openConnection(URL.java:1099) - at org.eclipse.core.internal.boot.PlatformURLConnection.connect(PlatformURLConnection.java:115) - at org.eclipse.core.internal.boot.PlatformURLConnection.getURLAsLocal(PlatformURLConnection.java:240) - at org.eclipse.core.internal.runtime.PlatformURLConverter.toFileURL(PlatformURLConverter.java:37) - at org.eclipse.core.runtime.FileLocator.toFileURL(FileLocator.java:261) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.lambda$3(XMLCatalogs.java:75) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:183) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.ReferencePipeline$Head.forEach(ReferencePipeline.java:658) - at java.base/java.util.stream.ReferencePipeline$7$1.accept(ReferencePipeline.java:274) - at java.base/java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:177) - at java.base/java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) - at java.base/java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) - at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) - at java.base/java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:150) - at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:173) - at java.base/java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) - at java.base/java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:497) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getWTPExtensionCatalog(XMLCatalogs.java:63) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLCatalogs.getAllCatalogs(XMLCatalogs.java:50) - at org.eclipse.wildwebdeveloper.xml.internal.ui.preferences.XMLPreferenceConstants.storePreferencesToLemminxOptions(XMLPreferenceConstants.java:90) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.mergeCustomInitializationOptions(XMLLanguageServer.java:163) - at org.eclipse.wildwebdeveloper.xml.internal.XMLLanguageServer.getInitializationOptions(XMLLanguageServer.java:158) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$8(LanguageServerWrapper.java:353) - at java.base/java.util.concurrent.CompletableFuture$UniCompose.tryFire(CompletableFuture.java:1072) - at java.base/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1705) - at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.exec(CompletableFuture.java:1692) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-17 16:46:46.975 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-17 16:46:46.982 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.jface 2 0 2022-09-17 16:47:38.400 -!MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-17 16:47:38.400 -!MESSAGE A conflict occurred for CTRL+SHIFT+G: -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.search.references.in.workspace,References in Workspace, - Search for references to the selected element in the workspace, - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@61a8f051, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.jboss.tools.seam.ui.find.references,Find Seam References, - Find Seam References, - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@289d4ca7, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.wst.sse.ui.structuredTextEditorScope,,,system) -Binding(CTRL+SHIFT+G, - ParameterizedCommand(Command(org.springframework.ide.eclipse.beans.ui.editor.commands.searchBeanReferences,Spring Beans References Search, - , - Category(org.eclipse.search.ui.category.search,Search,Search command category,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@1aa33c8f, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.wst.sse.ui.structuredTextEditorScope,,,system) - -!ENTRY org.eclipse.lsp4e 4 0 2022-09-17 16:47:46.225 -!MESSAGE org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. -!STACK 0 -java.util.concurrent.ExecutionException: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:395) - at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2022) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$13(LanguageServerWrapper.java:425) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.run(CompletableFuture.java:1736) - at java.base/java.util.concurrent.CompletableFuture$AsyncRun.exec(CompletableFuture.java:1728) - at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) - at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) - at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) - at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) - at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) -Caused by: org.eclipse.lsp4j.jsonrpc.ResponseErrorException: Internal error. - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.handleResponse(RemoteEndpoint.java:209) - at org.eclipse.lsp4j.jsonrpc.RemoteEndpoint.consume(RemoteEndpoint.java:193) - at org.eclipse.lsp4e.LanguageServerWrapper.lambda$4(LanguageServerWrapper.java:260) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.handleMessage(StreamMessageProducer.java:194) - at org.eclipse.lsp4j.jsonrpc.json.StreamMessageProducer.listen(StreamMessageProducer.java:94) - at org.eclipse.lsp4j.jsonrpc.json.ConcurrentMessageProcessor.run(ConcurrentMessageProcessor.java:113) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-17 16:47:54.166 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-17 16:47:59.977 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-17 16:48:00.472 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-17 16:48:00.689 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-17 16:48:01.881 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-17 16:48:51.223 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-17 16:48:51.224 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1002 2022-09-17 16:50:21.979 -!MESSAGE Unable to read repository at http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client. -!STACK 0 -java.io.IOException: http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client is not a valid repository location. - at org.eclipse.equinox.internal.p2.metadata.repository.SimpleMetadataRepositoryFactory.load(SimpleMetadataRepositoryFactory.java:103) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-17 16:50:33.750 -!MESSAGE No repository found at http://download.eclipse.org/technology/m2e/releases/. - -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-17 16:52:53.675 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.query(AbstractRepositoryManager.java:1179) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.hasRequirements(EclipsePluginLatestVersionProvider.java:86) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionInternal(EclipsePluginLatestVersionProvider.java:76) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionOnline(EclipsePluginLatestVersionProvider.java:50) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersion(EclipsePluginLatestVersionProvider.java:42) - at org.zeroturnaround.jrebel.client.update.PluginLatestVersion.findLatestVersion(PluginLatestVersion.java:24) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.getUpdateSiteVersion(JRebelPluginUpdate.java:159) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.checkForNewVersion(JRebelPluginUpdate.java:51) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.startNewVersionCheck(JRebelPluginUpdate.java:39) - at org.zeroturnaround.jrebel.ide.common.CommonStartup.lambda$startNewVersionCheck$4(CommonStartup.java:314) - at org.zeroturnaround.common.util.ExecutorUtil$RunnableWrapper.run(ExecutorUtil.java:161) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-17 16:52:53.676 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - -!ENTRY org.eclipse.equinox.p2.transport.ecf 4 1002 2022-09-17 16:53:07.195 -!MESSAGE HTTP Server 'Bad Gateway' : http://dl.bintray.com/testng-team/testng-p2-release/content.xml -!STACK 1 -org.eclipse.ecf.filetransfer.BrowseFileTransferException: HttpComponents connection error response code 502. - at org.eclipse.ecf.provider.filetransfer.httpclient45.HttpClientFileSystemBrowser.runRequest(HttpClientFileSystemBrowser.java:278) - at org.eclipse.ecf.provider.filetransfer.browse.AbstractFileSystemBrowser$DirectoryJob.run(AbstractFileSystemBrowser.java:71) - at org.eclipse.core.internal.jobs.Worker.run(Worker.java:63) -!SUBENTRY 1 org.eclipse.ecf.identity 4 0 2022-09-17 16:53:07.196 -!MESSAGE HttpComponents connection error response code 502. - -!ENTRY org.jboss.tools.windup.runtime 1 0 2022-09-17 23:02:01.019 -!MESSAGE Attempting to retrieve ExecutionBuilder from registry. -!SESSION 2022-09-18 17:30:56.612 ----------------------------------------------- -eclipse.buildId=12.21.3.GA-v20220127-1221-B485 -java.version=11.0.15 -java.vendor=Red Hat, Inc. -BootLoader constants: OS=linux, ARCH=x86_64, WS=gtk, NL=en_GB -Framework arguments: -product com.jboss.devstudio.core.product -Command-line arguments: -os linux -ws gtk -arch x86_64 -product com.jboss.devstudio.core.product - -!ENTRY org.eclipse.ui 2 0 2022-09-18 17:31:48.956 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-18 17:31:48.956 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.ui 2 0 2022-09-18 17:31:57.664 -!MESSAGE Warnings while parsing the commands from the 'org.eclipse.ui.commands' and 'org.eclipse.ui.actionDefinitions' extension points. -!SUBENTRY 1 org.eclipse.ui 2 0 2022-09-18 17:31:57.664 -!MESSAGE Commands should really have a category: plug-in='org.springframework.ide.eclipse.boot', id='org.springframework.ide.eclipse.boot.ui.EnableDisableBootDevtools', categoryId='org.springframework.ide.eclipse.boot.commands.category' - -!ENTRY org.eclipse.jface 2 0 2022-09-18 17:32:05.225 -!MESSAGE Keybinding conflicts occurred. They may interfere with normal accelerator operation. -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-18 17:32:05.225 -!MESSAGE A conflict occurred for CTRL+SHIFT+T: -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.jdt.ui.navigate.open.type,Open Type, - Open a type in a Java editor, - Category(org.eclipse.ui.category.navigate,Navigate,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@421b4cf3, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(CTRL+SHIFT+T, - ParameterizedCommand(Command(org.eclipse.lsp4e.symbolinworkspace,Go to Symbol in Workspace, - , - Category(org.eclipse.lsp4e.category,Language Servers,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@7625ce5a, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -!SUBENTRY 1 org.eclipse.jface 2 0 2022-09-18 17:32:05.226 -!MESSAGE A conflict occurred for ALT+SHIFT+R: -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.jdt.ui.edit.text.java.rename.element,Rename - Refactoring , - Rename the selected element, - Category(org.eclipse.jdt.ui.category.refactoring,Refactor - Java,Java Refactoring Actions,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@7787fce1, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) -Binding(ALT+SHIFT+R, - ParameterizedCommand(Command(org.eclipse.ui.edit.rename,Rename, - Rename the selected item, - Category(org.eclipse.ui.category.file,File,null,true), - org.eclipse.ui.internal.WorkbenchHandlerServiceHandler@89ee343, - ,,true),null), - org.eclipse.ui.defaultAcceleratorConfiguration, - org.eclipse.ui.contexts.window,,,system) - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-18 17:32:33.391 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.ui.navigator 2 0 2022-09-18 17:32:33.395 -!MESSAGE Can't find Navigator Content Descriptor with id: org.eclipse.jst.servlet.ui.EnhancedJavaRendering - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-18 17:33:48.254 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-18 17:33:56.681 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-18 17:33:57.860 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-18 17:33:58.219 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.update.configurator 4 0 2022-09-18 17:34:01.268 -!MESSAGE Can't find bundle for base name feature, locale en_GB - -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-18 17:34:47.606 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-18 17:34:47.607 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.2.5-20110915223420/. - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1002 2022-09-18 17:36:27.379 -!MESSAGE Unable to read repository at http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client. -!STACK 0 -java.io.IOException: http://marketplace.eclipse.org/marketplace-client-intro?mpc_install=321859%0ADrag%20to%20Install!%20Drag%20to%20your%20running%20Eclipse*%20workspace.%20*Requires%20Eclipse%20Marketplace%20Client is not a valid repository location. - at org.eclipse.equinox.internal.p2.metadata.repository.SimpleMetadataRepositoryFactory.load(SimpleMetadataRepositoryFactory.java:103) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.getAvailableRepositories(UpdateChecker.java:152) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker.checkForUpdates(UpdateChecker.java:130) - at org.eclipse.equinox.internal.p2.updatechecker.UpdateChecker$UpdateCheckThread.run(UpdateChecker.java:78) - -!ENTRY org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-18 17:36:39.108 -!MESSAGE No repository found at http://download.eclipse.org/technology/m2e/releases/. - -!ENTRY org.eclipse.equinox.p2.core 4 0 2022-09-18 17:38:58.010 -!MESSAGE Provisioning exception -!STACK 1 -org.eclipse.equinox.p2.core.ProvisionException: No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.fail(AbstractRepositoryManager.java:405) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:709) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:110) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.loadRepository(MetadataRepositoryManager.java:105) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.addChild(CompositeMetadataRepository.java:171) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepository.(CompositeMetadataRepository.java:113) - at org.eclipse.equinox.internal.p2.metadata.repository.CompositeMetadataRepositoryFactory.load(CompositeMetadataRepositoryFactory.java:124) - at org.eclipse.equinox.internal.p2.metadata.repository.MetadataRepositoryManager.factoryLoad(MetadataRepositoryManager.java:63) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:787) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.loadRepository(AbstractRepositoryManager.java:685) - at org.eclipse.equinox.internal.p2.repository.helpers.AbstractRepositoryManager.query(AbstractRepositoryManager.java:1179) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.hasRequirements(EclipsePluginLatestVersionProvider.java:86) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionInternal(EclipsePluginLatestVersionProvider.java:76) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersionOnline(EclipsePluginLatestVersionProvider.java:50) - at org.zeroturnaround.eclipse.update.EclipsePluginLatestVersionProvider.getLatestVersion(EclipsePluginLatestVersionProvider.java:42) - at org.zeroturnaround.jrebel.client.update.PluginLatestVersion.findLatestVersion(PluginLatestVersion.java:24) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.getUpdateSiteVersion(JRebelPluginUpdate.java:159) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.checkForNewVersion(JRebelPluginUpdate.java:51) - at org.zeroturnaround.jrebel.ide.common.utils.update.JRebelPluginUpdate.startNewVersionCheck(JRebelPluginUpdate.java:39) - at org.zeroturnaround.jrebel.ide.common.CommonStartup.lambda$startNewVersionCheck$4(CommonStartup.java:314) - at org.zeroturnaround.common.util.ExecutorUtil$RunnableWrapper.run(ExecutorUtil.java:161) - at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) - at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) - at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) - at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) - at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) - at java.base/java.lang.Thread.run(Thread.java:829) -!SUBENTRY 1 org.eclipse.equinox.p2.metadata.repository 4 1000 2022-09-18 17:38:58.010 -!MESSAGE No repository found at http://download.jboss.org/jbosstools/updates/requirements/webtools/3.3.2-20120210195245/. - -!ENTRY org.eclipse.equinox.p2.transport.ecf 4 1002 2022-09-18 17:39:10.516 -!MESSAGE HTTP Server 'Bad Gateway' : http://dl.bintray.com/testng-team/testng-p2-release/content.xml -!STACK 1 -org.eclipse.ecf.filetransfer.BrowseFileTransferException: HttpComponents connection error response code 502. - at org.eclipse.ecf.provider.filetransfer.httpclient45.HttpClientFileSystemBrowser.runRequest(HttpClientFileSystemBrowser.java:278) - at org.eclipse.ecf.provider.filetransfer.browse.AbstractFileSystemBrowser$DirectoryJob.run(AbstractFileSystemBrowser.java:71) - at org.eclipse.core.internal.jobs.Worker.run(Worker.java:63) -!SUBENTRY 1 org.eclipse.ecf.identity 4 0 2022-09-18 17:39:10.517 -!MESSAGE HttpComponents connection error response code 502. diff --git a/SOURCES/.metadata/.plugins/com.genuitec.eclipse.devstyle/recent.json b/SOURCES/.metadata/.plugins/com.genuitec.eclipse.devstyle/recent.json index c67e17d..c3819dc 100644 --- a/SOURCES/.metadata/.plugins/com.genuitec.eclipse.devstyle/recent.json +++ b/SOURCES/.metadata/.plugins/com.genuitec.eclipse.devstyle/recent.json @@ -1 +1 @@ -{"lastUsed":1663547566548,"path":"\/home\/rhel\/Documents\/fpmbuild\/SOURCES","projectCount":1,"recentFiles":[{"path":"\/examplemod\/src\/main\/java\/examplemod","parentWorkspace":"\/home\/rhel\/Documents\/fpmbuild\/SOURCES","name":"ExampleMod.java","lastModified":1663547621716,"locationURI":"file:\/home\/rhel\/Documents\/fpmbuild\/SOURCES\/examplemod\/src\/main\/java\/examplemod\/ExampleMod.java"}]} \ No newline at end of file +{"lastUsed":1663997855019,"path":"\/home\/rhel\/Documents\/fpmbuild\/SOURCES","projectCount":1,"recentFiles":[{"path":"\/examplemod\/src\/main\/java\/examplemod","parentWorkspace":"\/home\/rhel\/Documents\/fpmbuild\/SOURCES","name":"ExampleMod.java","lastModified":1663997294127,"locationURI":"file:\/home\/rhel\/Documents\/fpmbuild\/SOURCES\/examplemod\/src\/main\/java\/examplemod\/ExampleMod.java"}]} \ No newline at end of file diff --git a/SOURCES/.metadata/.plugins/com.genuitec.eclipse.monitor/myeclipse-usage.properties b/SOURCES/.metadata/.plugins/com.genuitec.eclipse.monitor/myeclipse-usage.properties index fda25dc..627c135 100644 --- a/SOURCES/.metadata/.plugins/com.genuitec.eclipse.monitor/myeclipse-usage.properties +++ b/SOURCES/.metadata/.plugins/com.genuitec.eclipse.monitor/myeclipse-usage.properties @@ -1,50 +1,46 @@ #MyEclipse Usage Data -#Sun Sep 18 22:04:30 PDT 2022 -view/org.zeroturnaround.eclipse.feature.guide.SetupGuideViewPart=3 -misc/arch=x86_64 -bundle/com.genuitec.eclipse.startup.workspace=51 -editor/org.jboss.tools.central.editors.JBossCentralEditor=2 -bundle/com.genuitec.eclipse.theming.ui=51 -bundle/com.genuitec.eclipse.theming.css=-1 -editor/org.eclipse.jdt.ui.ClassFileEditorNoSource=3 -view/org.eclipse.ui.navigator.ProjectExplorer=18 -misc/timestamp=19 Sep 2022, 05\:04\:30 -editor/org.eclipse.jdt.ui.CompilationUnitEditor=12 -bundle/com.genuitec.eclipse.theming.scrollbar=-1 -bundle/com.genuitec.eclipse.theming.base=-1 -devstyle/state/iconsColor/primary=1 -bundle/com.genuitec.eclipse.inlinesearch=51 -bundle/com.genuitec.eclipse.monitor=51 -devstyle/state/enabled=1 -editor/org.eclipse.jdt.ui.ClassFileEditor=1 -bundle/com.genuitec.eclipse.webclipse.evergreen=51 -bundle/com.genuitec.eclipse.news=-1 -misc/eclipseVersion/12.21.3.GA-v20220127-1221-B485=1 -misc/period=87640519 -view/org.eclipse.ui.views.PropertySheet=1 +#Fri Sep 23 22:38:21 PDT 2022 devstyle/state/inlinesearch=1 -bundle/com.genuitec.eclipse.ui.common.platform=55 -misc/count=55 -bundle/com.genuitec.eclipse.meexplorer=51 +misc/arch=x86_64 +bundle/com.genuitec.eclipse.startup.workspace=2 +bundle/com.genuitec.eclipse.ui.common.platform=3 +misc/count=3 +bundle/com.genuitec.eclipse.theming.ui=2 +bundle/com.genuitec.eclipse.meexplorer=2 misc/productType/devstyle=1 misc/os=linux -bundle/com.genuitec.eclipse.meexplorer.jdt=51 +bundle/com.genuitec.eclipse.meexplorer.jdt=2 +bundle/com.genuitec.eclipse.theming.css=-1 devstyle/state/workbenchColor/Dark_Gray=1 -perspective/org.jboss.tools.common.ui.JBossPerspective=5 -bundle/com.genuitec.eclipse.webicons=55 +perspective/org.jboss.tools.common.ui.JBossPerspective=1 +bundle/com.genuitec.eclipse.webicons=3 +view/org.eclipse.ui.navigator.ProjectExplorer=6 +editor/org.eclipse.jdt.ui.ClassFileEditorNoSource=1 misc/core_version= +misc/timestamp=24 Sep 2022, 05\:38\:21 +editor/org.eclipse.jdt.ui.CompilationUnitEditor=4 bundle/com.genuitec.eclipse.core.common.rss=-1 misc/installmode/stable=0 +bundle/com.genuitec.eclipse.theming.scrollbar=-1 misc/product/com.jboss.devstudio.core.product=1 devstyle/state/editorColor/Darkest_Dark=1 -editor/org.eclipse.m2e.editor.MavenPomEditor=4 +bundle/com.genuitec.eclipse.theming.base=-1 +devstyle/state/iconsColor/primary=1 +bundle/com.genuitec.eclipse.inlinesearch=2 misc/ws=gtk -bundle/com.genuitec.eclipse.patches=55 -bundle/com.genuitec.eclipse.startup=51 +bundle/com.genuitec.eclipse.patches=3 +bundle/com.genuitec.eclipse.monitor=2 +bundle/com.genuitec.eclipse.startup=2 +devstyle/state/enabled=1 misc/installmode/standalone=0 +editor/org.eclipse.jdt.ui.ClassFileEditor=2 +bundle/com.genuitec.eclipse.webclipse.evergreen=2 misc/locale=en_GB misc/workspace_hash=-2045440911 bundle/com.genuitec.eclipse.theming.epl=-1 -bundle/com.genuitec.eclipsecolortheme.api=55 -bundle/com.genuitec.eclipse.theming.core=55 -bundle/com.genuitec.eclipse.core.common.platform=55 +bundle/com.genuitec.eclipsecolortheme.api=3 +bundle/com.genuitec.eclipse.news=-1 +bundle/com.genuitec.eclipse.theming.core=3 +bundle/com.genuitec.eclipse.core.common.platform=3 +misc/eclipseVersion/12.21.3.GA-v20220127-1221-B485=1 +misc/period=2693123 diff --git a/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.projects/examplemod/.indexes/af/history.index b/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.projects/examplemod/.indexes/af/history.index index ce736e2..642b1a5 100644 Binary files a/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.projects/examplemod/.indexes/af/history.index and b/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.projects/examplemod/.indexes/af/history.index differ diff --git a/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.root/1.tree b/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.root/1.tree index 8b4a427..068d259 100644 Binary files a/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.root/1.tree and b/SOURCES/.metadata/.plugins/org.eclipse.core.resources/.root/1.tree differ diff --git a/SOURCES/.metadata/.plugins/org.eclipse.e4.workbench/workbench.xmi b/SOURCES/.metadata/.plugins/org.eclipse.e4.workbench/workbench.xmi index e39df59..2822424 100644 --- a/SOURCES/.metadata/.plugins/org.eclipse.e4.workbench/workbench.xmi +++ b/SOURCES/.metadata/.plugins/org.eclipse.e4.workbench/workbench.xmi @@ -1,20 +1,19 @@ - - + + activeSchemeId:org.eclipse.ui.defaultAcceleratorConfiguration - + - + topLevel - shellMinimized shellMaximized - - - + + + persp.actionSet:org.eclipse.mylyn.tasks.ui.navigation persp.actionSet:org.eclipse.ui.cheatsheets.actionSet @@ -91,2267 +90,2235 @@ persp.newWizSC:org.jboss.tools.cdi.ui.wizard.NewCDIProjectWizard persp.newWizSC:org.jboss.tools.cdi.ui.wizard.NewBeanCreationWizard persp.newWizSC:org.jboss.tools.cdi.ui.wizard.NewBeansXMLCreationWizard - - - - - active - noFocus - + + + + active + + View + categoryTag:General + + + View + categoryTag:Java + + + View + categoryTag:General + + + View + categoryTag:Java + + + View + categoryTag:Java Browsing + + + View + categoryTag:Java + + + + + View + categoryTag:JMX + + + + + + + + View categoryTag:General - + View - categoryTag:Java + categoryTag:General - + View categoryTag:General - + + + + + View - categoryTag:Java + categoryTag:General - + View - categoryTag:Java Browsing + categoryTag:Server - + View - categoryTag:Java + categoryTag:General - - - + View - categoryTag:JMX + categoryTag:General - - - - - - - - View - categoryTag:General - - - View - categoryTag:General - - - View - categoryTag:General - + + View + categoryTag:General - - - - - View - categoryTag:General - - - View - categoryTag:Server - - - View - categoryTag:General - - - View - categoryTag:General - - - View - categoryTag:General - - - View - categoryTag:General - - - View - categoryTag:General - - - View - categoryTag:Forge - - - View - categoryTag:JBoss Tools - + + View + categoryTag:General + + + View + categoryTag:General - - JRebel - - View - categoryTag:General - + + View + categoryTag:Forge + + + View + categoryTag:JBoss Tools + + + + + View + categoryTag:General - - - - - - - - View - inject - categoryTag:JRebel - NoRestore - - ViewMenu - menuContribution:menu - - - - + + View categoryTag:Help - + View categoryTag:General - + View categoryTag:Help - - - - + + + + - - + + - - + + View categoryTag:Help - + View categoryTag:General - + View categoryTag:Help - - + + org.eclipse.e4.primaryDataStack EditorStack - - - Editor - removeOnHide - org.eclipse.jdt.ui.CompilationUnitEditor - - + - + View categoryTag:General active - + activeOnClose + ViewMenu menuContribution:menu - + - + View categoryTag:Java - + View categoryTag:General - + View categoryTag:Java - + View categoryTag:Java Browsing - + View categoryTag:Java - + - + View categoryTag:General - + ViewMenu menuContribution:menu - + - + View categoryTag:Server - + ViewMenu menuContribution:menu - + - + View categoryTag:General - + ViewMenu menuContribution:menu - + - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + ViewMenu menuContribution:menu - + - + View categoryTag:General - + View categoryTag:JMX - + ViewMenu menuContribution:menu - + - + View categoryTag:Forge - + View categoryTag:JBoss Tools - + View categoryTag:General - - + + toolbarSeparator Draggable - + Draggable - + - + toolbarSeparator - + - + Draggable - + toolbarSeparator - + - + Draggable - + Draggable - + Draggable - + Draggable - + Draggable - + Draggable - + toolbarSeparator - - - - Draggable - + - + Draggable + - + toolbarSeparator - + - + toolbarSeparator - + - + Draggable - - toolbarSeparator - - - + stretch SHOW_RESTORE_MENU - + Draggable HIDEABLE SHOW_RESTORE_MENU - + - - + + stretch - + Draggable - + Draggable - - + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + platform:gtk - - - - + + + + platform:gtk - - - - - - - - - + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + - - + + - - - - - - - - - + + + + + + + + + - - - + + + - - - - - + + + + + - - - - - - + + + + + + - - + + - - - + + + - - - - + + + + - - - - - - - - - - - - + + + + + + + + + + + + platform:gtk - - - - - + + + + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - - + + + - - + + - - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + platform:gtk - + platform:gtk - - - + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - + + + + + + + - - - - + + + + - - + + - - - - + + + + - - - - - - - - + + + + + + + + - - - - + + + + - - + + - - + + - - - - - - + + + + + + - - - + + + - - - + + + - - - - - - + + + + + + - - + + - - - - - - - - - + + + + + + + + + - - - - + + + + - - + + - - - - + + + + - - + + - - - + + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - + + + + + + + - - - + + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - - - + + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - - - - - + + + + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Editor removeOnHide - + View categoryTag:Ant - + View categoryTag:Gradle - + View categoryTag:Gradle - + View categoryTag:Data Management - + View categoryTag:Data Management - + View categoryTag:Data Management - + View categoryTag:Debug - + View categoryTag:Debug - + View categoryTag:Debug - + View categoryTag:Debug - + View categoryTag:Debug - + View categoryTag:Debug - + View categoryTag:Debug - + View categoryTag:Git - + View categoryTag:Git - + View categoryTag:Git - + View categoryTag:Git NoRestore - + View categoryTag:Git - + View categoryTag:General - + View categoryTag:General - + View categoryTag:Help - + View categoryTag:Debug - + View categoryTag:Java - + View categoryTag:Java - + View categoryTag:Java - + View categoryTag:Java Browsing - + View categoryTag:Java Browsing - + View categoryTag:Java Browsing - + View categoryTag:Java Browsing - + View categoryTag:Java - + View categoryTag:General - + View categoryTag:Java - + View categoryTag:Java - + View categoryTag:JPA - + View categoryTag:JPA - + View categoryTag:JavaServer Faces - + View categoryTag:JavaServer Faces - + View categoryTag:Web Services - + View categoryTag:Docker - + View categoryTag:Docker - + View categoryTag:Docker - + View categoryTag:Docker - + View categoryTag:Maven - + View categoryTag:Maven - + View categoryTag:Mylyn - + View categoryTag:Mylyn - + View categoryTag:Mylyn - + View categoryTag:API Tools - + View categoryTag:Plug-in Development - + View categoryTag:Plug-in Development - + View categoryTag:Plug-in Development - + View categoryTag:Plug-in Development - + View categoryTag:Plug-in Development - + View categoryTag:Plug-in Development - + View categoryTag:Remote Systems - + View categoryTag:Remote Systems - + View categoryTag:Remote Systems - + View categoryTag:Remote Systems - + View categoryTag:Remote Systems - + View categoryTag:Remote Systems - + View categoryTag:Remote Systems - + View categoryTag:Remote Systems - + View categoryTag:General - + View categoryTag:General - + View categoryTag:CVS - + View categoryTag:CVS - + View categoryTag:Version Control (Team) - + View categoryTag:Version Control (Team) - + View categoryTag:Terminal - + View categoryTag:Other - + View categoryTag:General - + View categoryTag:General - + View categoryTag:Help - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:General - + View categoryTag:Debug - + View categoryTag:Other - + View categoryTag:Other - + View categoryTag:Other - + View categoryTag:Server - + View categoryTag:XML - + View categoryTag:XML - + View categoryTag:XML - + View categoryTag:XML - + View categoryTag:XML - + View categoryTag:Red Hat Fuse - + View categoryTag:Red Hat Fuse - + View categoryTag:Hibernate - + View categoryTag:Hibernate - + View categoryTag:Hibernate - + View categoryTag:Hibernate - + View categoryTag:JBoss Tools - + View categoryTag:Forge - + View categoryTag:Java Monitor - + View categoryTag:JMX - + View categoryTag:JBoss Tools Web - + View categoryTag:JBoss Tools Web - + View categoryTag:JBoss Tools Web - + View categoryTag:JBoss Tools - + View categoryTag:JBoss Tools - + View categoryTag:Quarkus - + View categoryTag:JBoss Tools Web - + View categoryTag:JBoss Tools Web Services - + View categoryTag:Java - + View categoryTag:Other - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Tracer for OpenGL ES - + View categoryTag:Tracer for OpenGL ES - + View categoryTag:Tracer for OpenGL ES - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:Android - + View categoryTag:SVN - + View categoryTag:Checkstyle - + View categoryTag:Checkstyle - + View categoryTag:Mylyn - + View categoryTag:Seam - + View categoryTag:MTA - + View categoryTag:MTA - + View categoryTag:MTA - + View categoryTag:Other - + View categoryTag:SonarLint - + View categoryTag:SonarLint - + View categoryTag:SonarLint - + View categoryTag:SonarLint - + View categoryTag:SonarLint - + View categoryTag:SonarLint - + View categoryTag:SonarLint - + View categoryTag:Spring - + View categoryTag:Spring - + View categoryTag:Spring - + View categoryTag:Spring - + View categoryTag:Spring - + View categoryTag:SVN - + View categoryTag:SVN - + View categoryTag:SVN - + View categoryTag:SVN - + View categoryTag:SVN - + View categoryTag:Vagrant - + View categoryTag:Vagrant - + View categoryTag:Other - + View @@ -2359,1739 +2326,1739 @@ categoryTag:JRebel NoRestore - - + + menuContribution:popup - - + + - - + + glue move_after:PerspectiveSpacer SHOW_RESTORE_MENU - + move_after:Spacer Glue HIDEABLE SHOW_RESTORE_MENU - + glue move_after:SearchField SHOW_RESTORE_MENU - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - - - - - - + + + + + + + + + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + - - - + + + - - - - - - - - + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SOURCES/.metadata/.plugins/org.eclipse.jdt.ui/dialog_settings.xml b/SOURCES/.metadata/.plugins/org.eclipse.jdt.ui/dialog_settings.xml index 1f7c184..ae17579 100644 --- a/SOURCES/.metadata/.plugins/org.eclipse.jdt.ui/dialog_settings.xml +++ b/SOURCES/.metadata/.plugins/org.eclipse.jdt.ui/dialog_settings.xml @@ -6,7 +6,4 @@
-
- -
diff --git a/SOURCES/.metadata/.plugins/org.eclipse.m2e.core.ui/dialog_settings.xml b/SOURCES/.metadata/.plugins/org.eclipse.m2e.core.ui/dialog_settings.xml index 3b8a6b8..d336880 100644 --- a/SOURCES/.metadata/.plugins/org.eclipse.m2e.core.ui/dialog_settings.xml +++ b/SOURCES/.metadata/.plugins/org.eclipse.m2e.core.ui/dialog_settings.xml @@ -1,14 +1,7 @@
-
-
- - - - - @@ -19,6 +12,16 @@
+
+ + + + + + + + +
@@ -38,27 +41,14 @@
-
- - - - - - - - -
- - - @@ -73,14 +63,4 @@
-
-
-
-
-
- - - - -
diff --git a/SOURCES/.metadata/.plugins/org.eclipse.pde.core/.cache/clean-cache.properties b/SOURCES/.metadata/.plugins/org.eclipse.pde.core/.cache/clean-cache.properties index 63cdc98..f32b850 100644 --- a/SOURCES/.metadata/.plugins/org.eclipse.pde.core/.cache/clean-cache.properties +++ b/SOURCES/.metadata/.plugins/org.eclipse.pde.core/.cache/clean-cache.properties @@ -1,2 +1,2 @@ #Cached timestamps -#Sat Sep 17 23:02:02 PDT 2022 +#Fri Sep 23 22:38:27 PDT 2022 diff --git a/SOURCES/.metadata/.plugins/org.eclipse.wst.jsdt.core/externalLibsTimeStamps b/SOURCES/.metadata/.plugins/org.eclipse.wst.jsdt.core/externalLibsTimeStamps index aee036f..df887a0 100644 Binary files a/SOURCES/.metadata/.plugins/org.eclipse.wst.jsdt.core/externalLibsTimeStamps and b/SOURCES/.metadata/.plugins/org.eclipse.wst.jsdt.core/externalLibsTimeStamps differ diff --git a/SOURCES/.metadata/.plugins/org.jboss.tools.central/jboss_buzz.xml b/SOURCES/.metadata/.plugins/org.jboss.tools.central/jboss_buzz.xml index 4d300d0..db96d04 100644 --- a/SOURCES/.metadata/.plugins/org.jboss.tools.central/jboss_buzz.xml +++ b/SOURCES/.metadata/.plugins/org.jboss.tools.central/jboss_buzz.xml @@ -1,2 +1,2 @@ -JBoss Tools Aggregated FeedJBoss Tools Aggregated FeedJBoss ToolsGCC's new fortification level: The gains and costsSiddhesh Poyarekaraa8105eb-3693-4988-8f01-b822ce7471ee2022-09-17T22:00:00Z2022-09-17T22:00:00Z<p>This article describes a new level of fortification supported in GCC. This new level detects more buffer overflows and bugs which mitigates security issues in applications at run time.</p> <p>C programs routinely suffer from memory management problems. For several years, a <code>_FORTIFY_SOURCE</code> preprocessor macro inserted error detection to address these problems at compile time and run time. To add an extra level of security, <code>_FORTIFY_SOURCE=3</code> has been in the GNU C Library (glibc) since version 2.34. I described its mechanisms in my previous blog post, <a href="https://developers.redhat.com/blog/2021/04/16/broadening-compiler-checks-for-buffer-overflows-in-_fortify_source">Broadening compiler checks for buffer overflows in _FORTIFY_SOURCE</a>. There has been compiler support for this builtin in <a href="https://clang.llvm.org">Clang</a> for some time. Compiler support has also been available for <a href="https://gcc.gnu.org">GCC</a> since the release of version 12 in May 2022. The new mitigation should be available in GNU/Linux distributions with packaged GCC 12.</p> <p>The following sections discuss two principal gains from this enhanced level of security mitigation and the resulting impact on applications.</p> <p><strong>2 principal gains:</strong></p> <ol> <li><p>Enhanced buffer size detection</p></li> <li><p>Better fortification coverage</p></li> </ol> <h2>1. A new buitin provides enhanced buffer size detection</h2> <p>There is a new buitin underneath the new <code>_FORTIFY_SOURCE=3</code> macro n GCC 12 named <code>__builtin_dynamic_object_size</code>. This builtin is more powerful than the previous <code>__builtin_object_size</code> builtin used in <code>_FORTIFY_SOURCE=2</code>. When passed a pointer, <code>__builtin_object_size</code>returns as a compile-time constant that is either the maximum or minimum object size estimate of the object that pointer may be pointing to at that point in the program. On the other hand, <code>__builtin_dynamic_object_size</code> is capable of returning a size expression that is evaluated at execution time. Consequently, the <code>_FORTIFY_SOURCE=3</code> builtin detects buffer overflows in many more places than <code>_FORTIFY_SOURCE=2</code>.</p> <p>The implementation of <code>__builtin_dynamic_object_size</code> in GCC is compatible with <code>__builtin_object_size</code> and thereby interchangeable, especially in the case of fortification. Whenever possible, the builtin computes a precise object size expression. When the builtin does not determine the size exactly, it returns either a maximum or minimum size estimate, depending on the size type argument.</p> <p>This code snippet demonstrates the key advantage of returning precise values:</p> <pre><code class="cpp">#include <string.h> #include <stdbool.h> #include <stdlib.h> char *b; char buf1[21]; char *__attribute__ ((noinline)) do_set (bool cond) { char *buf = buf1; if (cond) buf = malloc (42); memset (buf, 0, 22); return buf; } int main (int argc, char **argv) { b = do_set (false); return 0; } </code></pre> <p>The program runs to completion when built with <code>-D_FORTIFY_SOURCE=2</code>:</p> <pre><code>gcc -O -D_FORTIFY_SOURCE=2 -o sample sample.c </code></pre> <p>But the program aborts when built with <code>-D_FORTIFY_SOURCE=3</code> and outputs the following message:</p> <pre><code>*** buffer overflow detected ***: terminated Aborted (core dumped) </code></pre> <p>The key enhancement stems from the difference in behavior between <code>__builtin_object_size</code> and <code>__builtin_dynamic_object_size</code>. <code>_FORTIFY_SOURCE=2</code> uses <code>__builtin_object_size</code> and returns the maximum estimate for object size at pointer <code>buf</code>, which is 42. Hence, GCC assumes that the <code>memset</code> operation is safe at compile time and does not add a call to check the buffer size at run time.</p> <p>However, GCC with <code>_FORTIFY_SOURCE=3</code> invokes <code>__builtin_dynamic_object_size</code> to emit an expression that returns the precise size of the buffer that <code>buf</code> points to at that part in the program. As a result, GCC realizes that the call to <code>memset</code> might not be safe. Thus, the compiler inserts a call to <code>__memset_chk</code> into the running code with that size expression as the bound for <code>buf</code>.</p> <h2>2. Better fortification coverage</h2> <p>Building distribution packages with <code>_FORTIFY_SOURCE=3</code> revealed several issues that <code>_FORTIFY_SOURCE=2</code> missed. Surprisingly, not all of these issues were straightforward buffer overflows. The improved fortification also encountered issues in the GNU C library (glibc) and raised interesting questions about object lifetimes.</p> <p>Thus, the benefit of improved fortification coverage has implications beyond buffer overflow mitigation. I will explain the outcomes of <code>_FORTIFY_SOURCE=3</code> increased coverage in the following sections.</p> <h3>More trapped buffer overflows</h3> <p>Building applications with <code>_FORTIFY_SOURCE=3</code> detected many simple buffer overflows, such as the <a href="https://bugzilla.redhat.com/show_bug.cgi?id=2115476">off-by-one access in clisp</a> issue. We expected these revelations, which strengthened our justification for building applications with <code>_FORTIFY_SOURCE=3</code>.</p> <p>To further support the use of <code>_FORTIFY_SOURCE=3</code> to improve fortification, we used the <a href="https://github.com/siddhesh/fortify-metrics">Fortify metrics</a> GCC plugin to estimate the number of times _FORTIFY_SOURCE=3 resulted in a call to a checking function (<code>__memcpy_chk</code>, <code>__memset_chk</code>, etc.). We used Fedora test distribution and some of the <code>Server</code> package group as the sample, which consisted of 96 packages. The key metric is fortification coverage, defined by counting the number of calls to <code>__builtin_object_size</code> that resulted in a successful size determination and the ratio of this number taken to the total number of <code>__builtin_object_size</code> calls. The plugin also shows the number of successful calls if using <code>__builtin_dynamic_object_size</code> instead of <code>__builtin_object_size</code>, allowing us to infer the fortification coverage if all <code>__builtin_object_size</code> calls were replaced with <code>__builtin_dynamic_object_size</code>.</p> <p>In this short study, we found that <code>_FORTIFY_SOURCE=3</code> improved fortification by nearly 4 times. For example, the Bash shell went from roughly 3.4% coverage with <code>_FORTIFY_SOURCE=2</code> to nearly 47% with <code>_FORTIFY_SOURCE=3</code>. This is an improvement of nearly 14 times. Also, fortification of programs in <code>sudo</code> went from a measly 1.3% to 49.57% — a jump of almost 38 times!</p> <h3>The discovery of bugs in glibc</h3> <p>The increased coverage of <code>_FORTIFY_SOURCE=3</code> revealed programming patterns in application programs that tripped over the fortification without necessarily a buffer overflow. While there were some bugs in glibc, we had to either explain why we did not support it or discover ways to discourage those programming patterns.</p> <p>One example is <code>wcrtomb</code>, where glibc makes stronger assumptions about the object size passed than POSIX allowed. Specifically, glibc assumes that the buffer passed to <code>wcrtomb</code> is always at least <code>MB_CUR_MAX</code> bytes long. In contrast, the POSIX description makes no such assumption. Due to this discrepancy, any application that passed a smaller buffer would potentially make <code>wcrtomb</code> overflow the buffer during conversion. Then the fortified version <code>__wcrtomb_chk</code> aborts with a buffer overflow, expecting a buffer that is <code>MB_CUR_MAX</code> bytes long. We fixed this bug in glibc-2.36 by making glibc conform to POSIX .</p> <p><code>_FORTIFY_SOURCE=3</code> revealed another pattern. Applications such as systemd used <code>malloc_usable_size</code> to determine available space in objects and then used the residual space. The glibc manual discourages this type of usage, dictating that <code>malloc_usable_size</code> is for diagnostic purposes only. But applications use the function as a hack to avoid reallocating buffers when there is space in the underlying malloc chunk. The implementation of <code>malloc_usable_size</code> needs to be fixed to return the allocated object size instead of the chunk size in non-diagnostic use. Alternatively, another solution is to deprecate the function. But that is a topic for discussion by the glibc community.</p> <h3>Strict C standards compliance</h3> <p>One interesting use case exposed by <code>_FORTIFY_SOURCE=3</code> raised the question of object lifetimes and what developers can do with freed pointers. The bug in question was in <a href="https://sourceforge.net/p/autogen/bugs/212/">AutoGen</a>, using a pointer value after reallocation to determine whether the same chunk extended to get the new block of memory. This practice allowed the developer to skip copying over some pointers to optimize for performance. At the same time, the program continued using the same pointer, not the <code>realloc</code> call result, since the old pointer did not change.</p> <p>Seeing that the old pointer continued without an update, the compiler assumed that the object size remained the same. How could it know otherwise? The compiler then failed to account for the reallocation, resulting in an abort due to the perceived buffer overflow.</p> <p>Strictly speaking, the C standards prohibit using a pointer to an object after its lifetime ends. It should neither be read nor dereferenced. In this context, it is a bug in the application.</p> <p>However, this idiom is commonly used by developers to prevent making redundant copies. Future updates to <a href="https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105217">GCC</a> may account for this idiom wherever possible, but applications should also explicitly indicate object lifetimes to remain compliant. In the AutoGen example, a simple fix is to unconditionally refresh the pointer after reallocation, ensuring the compiler can detect the new object size.</p> <h2>The gains of improved security coverage outweigh the cost</h2> <p>Building with <code>_FORTIFY_SOURCE=3</code> may impact the size and performance of the code. Since <code>_FORTIFY_SOURCE=2</code> generated only constant sizes, its overhead was negligible. However, <code>_FORTIFY_SOURCE=3</code> may generate additional code to compute object sizes. These additions may also cause secondary effects, such as register pressure during code generation. Code size tends to increase the size of resultant binaries for the same reason.</p> <p>We need a proper study of performance and code size to understand the magnitude of the impact created by <code>_FORTIFY_SOURCE=3</code> additional runtime code generation. However the performance and code size overhead may well be worth it due to the magnitude of improvement in security coverage.</p> <h2>The future of buffer overflow detection</h2> <p><code>_FORTIFY_SOURCE=3</code> has led to significant gains in security mitigation. GCC 12 support brings those gains to distribution builds. But the new level of fortification also revealed interesting issues that require additional work to support correctly. For more background information, check out my previous article, <a href="https://www.redhat.com/en/blog/enhance-application-security-fortifysource">Enhance application security with FORTIFY_SOURCE</a>.</p> <p>Object size determination and fortification remain relevant areas for improvements in compiler toolchains. The toolchain team at Red Hat continues to be involved in the GNU and LLVM communities to make these improvements.</p> The post <a href="https://developers.redhat.com/articles/2022/09/17/gccs-new-fortification-level" title="GCC's new fortification level: The gains and costs">GCC's new fortification level: The gains and costs</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Siddhesh Poyarekar2022-09-17T22:00:00ZMy advice for updating Docker Hub's OpenJDK imageTim Ellison3bcb8704-1585-4386-8123-ee3bcc0890432022-09-16T18:00:00Z2022-09-16T18:00:00Z<p>The Java runtime environment in your containers could stop receiving updates in the coming months. It's time to take action. This article explains the decisions that led to this issue and proposes a solution.</p> <h2>OpenJDK and Java SE updates</h2> <p><a href="https://openjdk.org/">OpenJDK</a> is an open source implementation of the Java Platform, Standard Edition (Java SE), on which multiple companies and contributors collaborate.</p> <p>A project at OpenJDK represents each new feature release of the Java SE specification. Subsequent updates to those features, including functional and security fixes, are led by maintainers working in the <a href="https://openjdk.org/projects/jdk-updates/">JDK updates project</a>. Long-term supported releases such as Java SE 8 (since March 2014), Java SE 11 (since Sept 2018), and Java SE 17 (since Sept 2021) undergo a quarterly release update under the guidance of a lead maintainer.</p> <p>The <a href="https://openjdk.org/projects/jdk-updates/maintainers.html">repository maintainers' role</a> is to ensure that updates are both necessary and appropriate for deployed releases. They consider the opinions of multiple contributors when making such update decisions. Many vendors and distributors of Java SE subsequently build from the OpenJDK source code to provide new releases of their own branded Java SE offerings.</p> <p>Andrew Haley is the lead maintainer for Java 8 updates and Java 11 updates at Red Hat, and Goetz Lindenmaier (SAP) is the lead maintainer for Java 17 updates. Update maintainers affiliated with companies that provide commercially supported distributions of OpenJDK based on Java SE work as independent contributors to the project.</p> <h2>Docker Hub deprecates OpenJDK images</h2> <p>For many years, the official <a href="https://hub.docker.com/">Docker Hub</a> image builders took OpenJDK Java SE update binaries from <a href="https://adoptium.net/">Eclipse Adoptium</a> and other locations to build their own image. But in July 2022, the Docker Hub image builders <a href="https://hub.docker.com/_/openjdk">announced the deprecation</a> of this popular image.</p> <p>Now, Docker asks users to obtain their builds of OpenJDK, either from a commercial Java vendor or directly from the Adoptium project. There will be no further updates to the existing OpenJDK image, so users risk falling behind with functional and security updates to their Java SE usage unless they move to an alternate provider. I believe the official <a href="https://hub.docker.com/_/eclipse-temurin">Eclipse Temurin image</a> maintained by the Adoptium project is the obvious choice for a replacement image.</p> <h2>Eclipse Adoptium builds JDKs</h2> <p>OpenJDK does not provide binary updates directly from the update projects. Since July 2022, these long-term supported Java update projects have depended upon <a href="https://adoptium.net/">Eclipse Adoptium</a> to build and distribute consumable OpenJDK binaries.</p> <p>Adoptium is a project dedicated to building, testing, and distributing up-to-date and ready-to-use OpenJDK binaries under an open source license. Adoptium calls their builds of OpenJDK, Temurin. They are available across a broad range of processors and operating systems. These Temurin binaries have over half a billion downloads and earned the trust of enterprise production environments worldwide. A vendor-independent <a href="https://adoptium.net/members">working group</a> based at the Eclipse software foundation leads Adoptium.</p> <p>The Adoptium community provides binaries built directly from OpenJDK source code. These Temurin binaries are available as direct downloads, installers, or container images and are faithful representations of the OpenJDK update source built under controlled conditions.</p> <p>The <a href="https://hub.docker.com/_/eclipse-temurin">official Docker Hub Temurin images</a> contain the latest releases of the OpenJDK updates for several Java SE versions, thoroughly tested with various applications. The images work as direct drop-in replacements for the OpenJDK images. Some OpenJDK images already contain Temurin binaries.</p> <h2>How to move from OpenJDK images to Eclipse Temurin images</h2> <p>The Docker Hub's deprecation decision presents a problem. But there is a solution. We recommend moving from the <a href="https://hub.docker.com/_/openjdk">OpenJDK image</a> to <a href="https://hub.docker.com/_/eclipse-temurin">the official Docker Hub Eclipse Temurin image</a>.</p> <p>The process is simple. All you have to do is identify the <code>FROM</code> lines in Dockerfiles such as this:</p> <pre> <code class="java">FROM: openjdk:17</code></pre> <p>Change the lines as follows:</p> <pre> <code class="java">FROM eclipse-temurin:17</code></pre> <p>The process for changing the use of images other than version 17 is equivalent. You can <a href="https://github.com/adoptium/adoptium-support/issues">report</a> issues to the Adoptium community.</p> <h2>Red Hat support</h2> <p>We encourage everyone to switch to Eclipse Temurin. Many <a href="https://github.com/jenkinsci/docker/pull/1429">application images</a> and <a href="https://github.com/javastacks/spring-boot-best-practice/blob/fc6709cf2ec2fc00b4dfae7210ce503f9c10560c/spring-boot-docker/Dockerfile">examples of best practices</a> have successfully made the change.</p> <p>Red Hat recently <a href="https://developers.redhat.com/articles/2022/08/24/red-hat-expands-support-java-eclipse-temurin">announced direct support for Temurin</a> in development and production as part of Red Hat Runtimes, Red Hat OpenShift, and Red Hat Build of OpenJDK. Red Hat support assures customers that the move to Temurin will be smooth, allowing you to continue focusing on building products that integrate and automate modern business applications and processes.</p> The post <a href="https://developers.redhat.com/articles/2022/09/16/updating-docker-hubs-openjdk-image" title="My advice for updating Docker Hub's OpenJDK image ">My advice for updating Docker Hub's OpenJDK image </a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Tim Ellison2022-09-16T18:00:00ZRegex how-to: Quantifiers, pattern collections, and word boundariesBob Reselman2182a29a-626a-444f-a313-1e4a14d6eeb72022-09-16T07:00:00Z2022-09-16T07:00:00Z<p>Filtering and searching text with regular expressions is an important skill for every developer. Regular expressions can be tricky to master. To work with them effectively, you need a detailed understanding of their symbols and syntax.</p> <p>Fortunately, learning to work with regular expressions can be incremental. You don't need to learn everything all at once to do useful work. Rather, you can start with the basics and then move into more complex topics while developing your understanding and using what you know as you go along.</p> <p>This article is the second in a series. The <a href="https://developers.redhat.com/articles/2022/08/03/beginners-guide-regular-expressions-grep">first article</a> introduced some basic elements of regular expressions: The basic metacharacters (<code>.*^$\s\d</code>) as well as the escape metacharacter <code>\</code>.</p> <p>This article introduces some more advanced syntax: quantifiers, pattern collections, groups, and word boundaries. If you haven't read the first article, you might want to review it now before continuing with this content.</p> <p>These articles demonstrate regular expressions by piping string output from an <a href="https://www.redhat.com/sysadmin/essential-linux-commands"><code>echo</code></a> command to the <a href="https://www.redhat.com/sysadmin/how-to-use-grep"><code>grep</code></a> utility. The <code>grep</code> utility uses a regular expression to filter content. The benefit of demonstrating regular expressions using <code>grep</code> is that you don't need to set up any special programming environment. You can execute an example of a regular expression immediately by copying and pasting the code directly into your terminal window running under Linux.</p> <h2>What's the difference between a regular character and a metacharacter</h2> <p>A regular character is a letter, digit, or punctuation used in everyday text. When you declare a regular character in a regular expression, the regular expression engine searches content for that declared character. For example, were you to declare the regular character <code>h</code> in a regular expression, the engine would look for occurrences of the character <code>h</code>.</p> <p>A metacharacter is a placeholder symbol. For example, the metacharacter <code>.</code> (dot) represents "any character," and means <em>any character matches here.</em> The metacharacter <code>\d</code> represents a numerical digit, and means <em>any digit matches here.</em> Thus, when you use a metacharacter, the regex engine searches for characters that comply with the particular metacharacter or set of metacharacters.</p> <h2>What are quantifiers?</h2> <p>A quantifier is a syntactic structure in regular expressions that indicates the number of times a character occurs in sequence in the input text. There are two ways to declare a quantifier. One way is:</p> <pre> <code class="java">x{n}</code></pre> <p>In this syntax:</p> <ul> <li><code>x</code> is the character to match.</li> <li><code>n</code> indicates the number of times the character needs to occur.</li> </ul> <p>A related syntax declares a quantifier with a minimum and maximum range:</p> <pre> <code class="java">x{n,m}</code></pre> <p>In this syntax:</p> <ul> <li><code>x</code> is the character to match.</li> <li><code>n</code> indicates the minimum number of occurrences and <code>m</code> indicates the maximum number of occurrences.</li> </ul> <p>The following example uses a quantifier to create a matching pattern that identifies two occurrences of the regular character <code>g</code> in sequence:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'g{2}'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gre<strong>gg</strong> and the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>Thus, the regular expression returns the following result:</p> <pre> <code class="java">gg</code></pre> <p>The following example uses a quantifier to create a matching pattern that identifies a minimum and a maximum for occurrences of the character <code>g</code> in a sequence. The minimum length is 1 and the maximum is 2. The regular expression is processed in a case-insensitive manner, as indicated by the <code>-i</code> option to <code>grep</code>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Poi 'g{1,2}'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. <strong>G</strong>re<strong>gg</strong> and the do<strong>g</strong> Fido. Chris has 1 bird named Tweety.</code></p> <p>Because each sequence is identified and returned on a one-by-one basis, the output is:</p> <pre> <code class="java">G gg g</code></pre> <h2>What are pattern collections?</h2> <p>A pattern collection is a syntactic structure that describes a <a href="https://www.gnu.org/software/grep/manual/html_node/Character-Classes-and-Bracket-Expressions.html">character class</a>. A character class is a set of metacharacters and regular characters that combine to create a matching pattern that, like a metacharacter, can match many different characters in text. A pattern collection is defined between square brackets (<code>[ ]</code>).</p> <p>The following example uses the <code>[A-Z]</code> character class, which denotes any uppercase character from <code>A</code> to <code>Z</code> inclusive, to create a pattern collection that matches only uppercase characters in the given text:</p> <p><code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[A-Z]'</code></p> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code><strong>J</strong>eff and the pet <strong>L</strong>ucky. <strong>G</strong>regg and the dog <strong>F</strong>ido. <strong>C</strong>hris has 1 bird named <strong>T</strong>weety.</code></p> <p>The output is:</p> <pre> <code class="java">J L G F C T</code></pre> <p>The following example uses the <code>[0-9]</code> character class, which denotes any digit between <code>0</code> and <code>9,</code> to create a pattern collection that matches only numeric characters in the given text:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[0-9]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has <strong>1</strong> bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">1</code></pre> <p>The following example uses a pattern collection that matches certain exact regular characters within a set of regular characters. The regular expression says: <em>Match any <code>f</code>, <code>G</code>, or <code>F</code></em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[fGF]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Je<strong>ff</strong> and the pet Lucky. <strong>G</strong>regg and the dog <strong>F</strong>ido. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">f f G F</code></pre> <p>The following example uses a pattern collection with both metacharacters and regular characters. The logic behind the regular expression says: <em>Match any <code>g</code>, <code>r</code>, or <code>e</code> followed by a space character and then the string <code>Fido</code></em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[gre]\sFido'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gregg and the do<strong>g Fido</strong>. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">g Fido</code></pre> <p>The following example uses two pattern collections along with metacharacters that are outside them. The regular expression says: <em>Match a numeric character, then continue matching any character zero or many times that is followed by an uppercase character</em>. The pattern collection <code>[0-9]</code> indicates any numeral from <code>0</code> to <code>9</code>. The metacharacters <code>.*</code> indicate zero or more instances of any character, and the pattern collection <code>[A-Z]</code> indicates any uppercase character from <code>A</code> to <code>Z</code>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[0-9].*[A-Z]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has <strong>1 bird named T</strong>weety.</code></p> <p>The output is:</p> <pre> <code class="java">1 bird named T</code></pre> <p>The following example uses the negation metacharacter <code>^</code> within a pattern collection. The negation metacharacter indicates that the succeeding characters are <em>not</em> to be matched when the regular expression is being executed.</p> <p class="Indent1"><strong>Note</strong>: As you might remember from the first article in this series, <code>^</code> is the same metacharacter that indicates a line start—but only when used <em>outside</em> square brackets. The <code>^</code> metacharacter indicates negation <em>only</em> when it appears within the square brackets (<code>[ ]</code>) that declare a pattern collection.</p> <p>The following collection pattern says: <em>Match any character that is not <code>a</code>, <code>e</code>, <code>i</code>, <code>o</code>, or <code>u</code></em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky." $ echo $teststr | grep -Po '[^aeiou]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. The text is underlined to make the space characters apparent:</p> <p><code><u><strong>J</strong>e<strong>ff </strong>a<strong>nd th</strong>e<strong> p</strong>e<strong>t L</strong>u<strong>cky.</strong></u></code></p> <p>Space characters in the following output are also underlined to make them apparent. Space characters are matched by this regular expression:</p> <pre class="language-bash"> <code>J f f _ n d _ t h _ p t _ L c k y . </code></pre> <h2>Groups</h2> <p>A group in a regular expression is, as the name implies, a group of characters declared according to a specific definition. A group declaration can include metacharacters and regular characters. A group is declared between open and closed parentheses like this: <code>( )</code>.</p> <p>The following example uses a <code>.</code> (dot) metacharacter, which indicates "any character." The declared group says: <em>Match any three characters as a group and return each group</em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '(...)'</code></pre> <p>The regular expression matches the characters highlighted in alternating bold and non-bold text as shown in the following text. Again, the text is underlined to make the space characters apparent:</p> <p><code><u><strong>Jef</strong>f a<strong>nd </strong>the<strong> pe</strong>t L<strong>uck</strong>y. <strong>Gre</strong>gg <strong>and</strong> th<strong>e d</strong>og <strong>Fi</strong>do. <strong>Chr</strong>is <strong>has</strong> 1 <strong>bir</strong>d n<strong>ame</strong>d T<strong>wee</strong>ty.</u></code></p> <p>Because the group is identified and returned on a one-by-one basis, the output is:</p> <pre> <code class="language-bash">Jef f_a nd_ the _pe t_L uck y._ Gre gg_ and _th e_d og_ Fid o._ Chr is_ has _1_ bir d_n ame d_T wee ty. </code></pre> <p>The following example uses the <code>.</code> (dot) metacharacter along with the regular character <code>y</code> to define a group of three characters, of which the first two characters can be anything and the third character must be <code>y</code>.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '(..y)'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lu<strong>cky</strong>. Gregg and the dog Fido. Chris has 1 bird named Twe<strong>ety</strong>.</code></p> <p>The output is:</p> <pre> <code class="java">cky ety</code></pre> <p>The following example demonstrates a regular expression group that uses the <code>.</code> (dot) metacharacter along with the <code>\d</code> metacharacter to define a group of five characters, of which the first two characters are any regular character, the third character is a digit, and the last two characters are any regular characters:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '(..\d..)'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. The text is underlined to make the space characters apparent.</p> <p><code><u>Jeff and the pet Lucky. Gregg and the dog Fido. Chris ha<strong>s 1 b</strong>ird</u></code><code><u> named Tweety.</u></code></p> <p>The output is:</p> <pre> <strong><code class="java">s<u> </u>1<u> </u>b</code></strong></pre> <h2>Word boundaries</h2> <p>A word character is declared using the metacharacters <code>\w</code>. A word character indicates any uppercase character, lowercase character, numeric character, or connector character such as a hyphen.</p> <p>A word boundary is defined as a transition between a word character and a beginning space, an ending space, or a punctuation mark ( <code>.!?</code> ). A word boundary is declared using the metacharacters <code>\b</code>.</p> <p>The following example demonstrates a regular expression that uses the metacharacters <code>\w+</code> to find occurrences of words within text. The metacharacter <code>+</code> indicates one or more occurrences of a character. The logic in play is: <em>Match one or more word characters</em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. $ echo $teststr | grep -Po '\w+'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code><strong>Jeff</strong> <strong>and</strong> <strong>the</strong> <strong>pet</strong> <strong>Lucky</strong></code></p> <p>Because each word is identified and returned on a one-by-one basis, the output is:</p> <pre> <code class="java">Jeff and the pet Lucky</code></pre> <p>The following example uses a word boundary to find occurrences of the regular character <code>a</code> that appears at the beginning of a word:</p> <pre> <code class="language-bash">"Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '\ba'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>and the pet Lucky. Gregg <strong>a</strong>nd the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">a a</code></pre> <p>The following example uses a word boundary to find occurrences of the regular character <code>y</code> that appear at the end of a word:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'y\b'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. Note that punctuation marks at the end of a word are not considered word characters and are excluded from the match:</p> <p><code><u>Jeff and the pet Luck<strong>y</strong>. Gregg and the dog Fido. Chris has 1 bird named Tweet<strong>y</strong>.</u></code></p> <p>The output is:</p> <pre> <code class="java">y y</code></pre> <p>The following example uses a word boundary to find occurrences of the regular characters <code>Tweety</code> that appear at the end of a word:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'Tweety\b'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. Again, notice that punctuation marks at the end of a word are excluded:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named <strong>Tweety</strong>.</code></p> <p>The output is:</p> <pre> <code class="java">Tweety</code></pre> <p>The following example contains a regular expression group that uses word boundaries to find occurrences of words that start with the regular character <code>a</code> and end with the regular character <code>d</code>. The regular expression uses the metacharacters <code>\w*</code> to declare all occurrences of word characters:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '\ba\w*d\b'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text.</p> <p><code>Jeff <strong>and</strong> the pet Lucky. Gregg <strong>and</strong> the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">and and</code></pre> <h2>Grouping and specifying multiple characters simultaneously extend regular expressions</h2> <p>This article gave you an introduction to working with quantifiers, pattern collections, groups, and word boundaries. You learned to use quantifiers to declare a range of character occurrences to match. Also, you learned that pattern collections enable you to declare character classes that match characters in a generic manner. Groups execute matches that declare a particular set of characters. Word boundaries allow you to make matches by working within the boundaries of space characters and punctuation marks.</p> <p>These intermediate concepts covered in this article will bring additional power and versatility to working regular expressions. But there's a lot more to learn. Fortunately, as mentioned at the beginning of this article, you can use the concepts and techniques discussed in this article immediately.</p> <p>The key is to start practicing what you've learned now. Mastery is the result of small, incremental accomplishments. As with any skill, the more you practice, the better you'll get.</p> The post <a href="https://developers.redhat.com/articles/2022/09/16/regex-how-quantifiers-pattern-collections-and-word-boundaries" title="Regex how-to: Quantifiers, pattern collections, and word boundaries">Regex how-to: Quantifiers, pattern collections, and word boundaries</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Bob Reselman2022-09-16T07:00:00ZNew Keycloak maintainer: Michal HajasStian Thorgersenhttps://www.keycloak.org/2022/09/mhajas2022-09-16T00:00:00ZWe are pleased to welcome as an official maintainer of Keycloak. Michal has been with the Keycloak project since September 2015, and since that period has to almost every component of Keycloak - core server, authorization services, adapters, javascript, code auto-generation, legacy operator - either by review or code contribution. Since his first involvement, he has steadily contributed code, currently . Lately, he has designed and co-developed Hot Rod storage and has been instrumental in overall establishing the new map storage. He reviews community contributions and offers help to finalize PRs, as well as participates in community discussions and issue triaging. He understands and respects the code of conduct, and in reviews helps maintaining it.Stian ThorgersenMultiple repositories Pull Request chaos, crawl them all in one single placeEnrique Mingorance Canohttps://blog.kie.org/2022/09/multiple-repositories-pull-request-chaos-crawl-them-all-in-one-single-place.html2022-09-15T17:00:00ZFlickr chaos – https://bit.ly/3Q2zfYS It is very frequent to find software engineering projects where multiple repositories are involved for the same or different projects, somehow related between them, a lot of people push their pull requests to any of them and it is very normal to lose tracking of the situation or you have to constantly browse them all to have a clearer picture about what is going on. That’s the situation we had here at the Red Hat Business Automation team and we solved it by creating a helpful tool you can easily use for your set of projects, easy, quick and for free. THE CROSS-REPO PRS PROBLEM This is already covered by entry, so feel free to read it in case you are not familiar with this kind of situation or concepts. THE CHAIN-STATUS SOLUTION So we said to ourselves, what if we would have a centralized place, a web page for instance, to be able to see in a quick look what’s the situation about all the pull requests for all of our repositories? was the solution. Prerequisites: * It has to solve not only our particular problem, so anyone can use it. * It has to be public, no authentication required. * It has to be fast, we can’t wait for the whole pull request set to be crawled everytime anyone gets into the application. * Multiple streams or different project set can be handled in different views, like different products or product versions from the same place. * The content can be filtered out. So the conclusion was to create in one hand a React web page to consume the pull request information from a static report and another tool to generate that report based on Github information. This way: * The information will be produced asynchronously, the frequency will be up to the user/developer and Github API rate limit problems will be avoided. * The information can be taken even from private repositories and be exposed publicly and no authentication will be required.  * No waiting time while information is requested from Github service. * The webpage (HTML+JS files) can be stored on any web service, even on free services like or . * No backend server is required. RUNNING EXAMPLE You can check KIE RHBA status web page at Chain Status web tool screenshot HOW CAN I ADD IT TO MY ORGANIZATION? The best way to integrate this tool in your organization or set of repositories is by using the provided configurable . In particular this tool comes with two main easy-to-use : * Generate App: this action aims to build and copy the React web application inside your repository and publish it using NPM tool. * Generate Data: given a project structure and some project information as input, this action is focused on generating the data report gathering the information using the Github API. This report is then used by the web application as a content source. Thus, in order to use these actions on your organization, you only have to add two (one per action) on your main repository as follows: 1. Prerequisites: having a Github token properly configured in your organization, on how to configure it. 2. Generate app workflow (generate_status_page.yaml): add the Github workflow for the web page generation, this should generally be run only once (or whenever there are changes on the web app look and feel). name: Generate status page on: workflow_dispatch jobs:   generate-status-page:     if: github.repository_owner == '<OWNER>'     concurrency:       group: generate-status-page       cancel-in-progress: true     strategy:       matrix:         os: [ubuntu-latest]       fail-fast: true     runs-on: ubuntu-latest     name: Generate status page     steps:       - name: Generate status page         uses: kiegroup/chain-status/.ci/actions/generate-app@main         with:           info-md-url: "<PATH-TO-INFO>"           github-token: "${{ secrets.GITHUB_TOKEN }}"           gh-pages-branch: "gh-pages" 3. Generate data workflow (generate_status_page_data.yaml): add the periodic workflow that will continuously generate the data fetched by the web application. name: Generate status page data on:   workflow_dispatch:   schedule:     - cron: '0 * * * *' jobs:   generate-status-page-data:     if: github.repository_owner == '<OWNER>'     concurrency:       group: generate-status-page-data       cancel-in-progress: true     strategy:       matrix:         os: [ubuntu-latest]       fail-fast: true     runs-on: ubuntu-latest     name: Generate status page data     steps:       - name: Generate status page data         uses: kiegroup/chain-status/.ci/actions/generate-data@main         with:           definition-file: <PATH-TO-DEFINITION-FILE>           # projects: <PROJECTS-LIST>           title: <TITLE>           subtitle: <SUBTITLE>           base-branch-filter: <BRANCH-LIST>           created-by: Github Action           created-url: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}           logger-level: debug           github-token: "${{ secrets.GITHUB_TOKEN }}"           gh-pages-branch: "gh-pages"  As already introduced, the generate data flow relies on a project structure definition which can be provided either using build-chain definition file or a projects list: * Build-chain definition file (using ‘definition-file’ field), a YAML definition file for cross-related inter-dependent projects which was introduced for . This tool is already covered by , so feel free to read it if you want to get more details on it and on its definition files. * Projects list (using ‘projects’ field), a comma-separated list of projects for which you would like to provide Pull Requests statuses. [Still a Work in Progress ] This was a brief explanation on how you could integrate this tool in your organization, if you need more details on this feel free to reach the homepage, where you can find a step-by-step guide on how to integrate it with some links to running examples. ADDITIONAL FUNCTIONALITIES Additionally to the pull request summary functionality, it is also possible to add multiple Jenkins status reports. The main advantage of this feature is that you can check the status of all your Jenkins jobs in a single place, making it easier to check what runs succeeded/failed and also the time and average time jobs are consuming. As an example you can check the KIE RHBA daily builds page   To configure the Jenkins status reports feature, you can create a Jenkins pipeline that will generate and update the data periodically. You can schedule the Jenkins pipeline to run and keep the status updated based on your required demand. You can add the following steps as part of your Jenkins pipeline to generate and update the status report: 1. Clone the GitHub pages repository stage('Clone gh-pages repository') {   steps {     script {       println "Checking out https://github.com/${ghPagesRepository}:${ghPagesBranch} into ${ghPagesRepoFolder} folder"       sh "git clone -b ${ghPagesBranch} --single-branch https://github.com/${ghPagesRepository} ${ghPagesRepoFolder}"     }   } } 2. Install the chain-status tool stage('Install chain-status tool') {   steps {     script {       try {         sh "npm install -g @kie/chain-status-action"       } catch(e) {         println '[WARNING] Error installing @kie/chain-status-action.'       }     }   } } 3. Generate the updated data stage('Generate data') {   steps {     script {       dir(ghPagesRepoFolder) {         sh "build-chain-status-report --jenkinsUrl ${jenkinsURL} --jobUrl ${jenkinsJobPath} -t ${projectTitle} -st ${projectSubtitle} --certFilePath ${jenkinsCertFile} --outputFolderPath ./data/ --skipZero -cb \"Jenkins Job\" -cu \"${env.BUILD_URL}\" --order 1001"       }     }   } } 4. Push changes to update the status report stage('Push changes to repository') {   steps {     script {       println "Pushing changes to ${ghPagesRepository}:${ghPagesBranch}"         dir(ghPagesRepoFolder) {           withCredentials([usernamePassword(credentialsId: "${githubCredentialsId}", usernameVariable: 'GITHUB_USER', passwordVariable: 'GITHUB_TOKEN')]) { githubscm.setUserConfig("${GITHUB_USER}")           sh("git config --local credential.helper \"!f() { echo username=\\$GITHUB_USER; echo password=\\$GITHUB_TOKEN; }; f\"")           sh 'git add data/*'           sh 'git commit -m "Generate Jenkins Data"'           sh "git push origin ${ghPagesBranch}"                                   }       }     }   } } NEXT STEPS AND LIMITATIONS HISTORIC FUNCTIONALITY Since the generator tool registers every day status, we expect to offer the historic view functionality to be able to compare status between dates.See TO COVER NOT ONLY GITHUB BUT OTHER REPOSITORY SERVICES Right now we only cover Github for the generator tool to take information from, but we expect to cover another kind of services like Gitlab or Bitbucket. CONCLUSION We have been using this tool for , and repositories for a year and we can say it’s a very useful tool which solves the cross-repo pull requests summary problem. After a year of experience with the tool we can say the tool offers: * To be able to constantly see the status of the different contributions from the different people. * Who is working on what, like which are my own open pull requests. * To quickly check obsolete contributions and to be able to keep our repositories very clean. * To publicly offer Jenkins jobs summary no matter whether the Jenkins is accessible or not. * To quickly check how healthy our CI/CD stuff is thanks to the error index information from the tool. * To be able to see related pull requests for every pull request, thanks to the cross-repo pull request functionality. USEFUL LINKS [Chain status]   [Build chain tool] [Build chain npm package]   [Configuration reader] [RHBA definition and project tree files]  [RHBA flows]   Featured photo by The post appeared first on .Enrique Mingorance CanoHow to implement a job queue with RedisClement Escoffier (https://twitter.com/clementplop)https://quarkus.io/blog/redis-job-queue/2022-09-15T00:00:00Z2022-09-15T00:00:00ZIn how to cache with Redis, we implemented a simple cache backed by Redis. That’s just one use case of Redis. Redis is also used as a messaging server to implement the processing of background jobs or other kinds of messaging tasks. This post explores implementing this pattern with Quarkus...Clement Escoffier (https://twitter.com/clementplop)2022-09-15T00:00:00ZGetting started with Jakarta RESTful ServicesF.Marchionihttp://www.mastertheboss.com/jboss-frameworks/resteasy/getting-started-with-jakarta-restful-services/2022-09-14T16:44:00ZThe latest release of RESTEasy (6.1.0) provides an implementation for Jakarta RESTful Web Services 3.1. which is a core component of Jakarta EE 10. Let’s review through this article which are the core features of Jakarta Rest Services. What’s new in Jakarta RESTful Web Services 3.1 The Jakarta RESTful Web Services 3.1 specification (part of ... The post appeared first on .F.MarchioniA beginner’s guide to regular expressions with grepBob Reselmand2745cb9-0e7f-4c18-88b2-a5ce98fb99ac2022-09-14T07:00:00Z2022-09-14T07:00:00Z<p>A <em>regular expression</em> (also called a <em>regex</em> or <em>regexp</em>) is a rule that a computer can use to match characters or groups of characters within a larger body of text. For instance, using regular expressions, you could find all the instances of the word <em>cat</em> in a document, or all instances of a word that begins with <em>c</em> and ends with <em>t.</em></p> <p>Use of regular expressions in the real world can get much more complex—and powerful—than that. For example, imagine you need to write code verifying that all content in the body of an HTTP POST request is free of script injection attacks. Malicious code can appear in any number of ways, but you know that injected script code will always appear between <code><script></script></code> HTML tags. You can apply the regular expression <code><script>.*<\/script></code>, which matches any block of code text bracketed by <code><script></code> tags, to the HTTP request body as part of your search for script injection code.</p> <p>This example is but one of many uses for regular expressions. In this series, you'll learn more about how the syntax for this and other regular expressions work.</p> <p>As just demonstrated, a regex can be a powerful tool for finding text according to a particular pattern in a variety of situations. Once mastered, regular expressions provide developers with the ability to locate patterns of text in source code and documentation at design time. You can also apply regular expressions to text that is subject to algorithmic processing at runtime such as content in HTTP requests or event messages.</p> <p>Regular expressions are supported by many programming languages, as well as classic command-line applications such as <a href="https://www.redhat.com/sysadmin/linux-text-manipulation-tools">awk, sed, and grep</a>, which were developed for Unix many decades ago and are now offered on GNU/Linux.</p> <p>This article examines the basics of using regular expressions under <code>grep</code>. The article shows how you can use a regular expression to declare a pattern that you want to match, and outlines the essential building blocks of regular expressions, with many examples. This article assumes no prior knowledge of regular expressions, but you should understand how to with the <a href="https://developers.redhat.com/topics/linux">Linux</a> operating system at the command line.</p> <h2>What are regular expressions, and what is grep?</h2> <p>As we've noted, a regular expression is a rule used for matching characters in text. These rules are <em>declarative,</em> which means they are immutable: once declared, they do not change. But a single rule can be applied to any variety of situations.</p> <p>Regular expressions are written in a special language. Although this language has been standardized, dialects vary from one regular expression engine to another. For example, <a href="https://developers.redhat.com/topics/javascript">JavaScript</a> has a regex dialect, as do <a href="https://developers.redhat.com/topics/c">C++</a>, <a href="https://developers.redhat.com/java">Java</a>, and <a href="https://developers.redhat.com/topics/python">Python</a>.</p> <p>This article uses the regular expression dialect that goes with the Linux <a href="https://www.redhat.com/sysadmin/how-to-use-grep">grep</a> command, with an extension to support more powerful features. <code>grep</code> is a binary executable that filters content in a file or output from other commands (stdout). Regular expressions are central to <code>grep</code>: The <em>re</em> in the middle of the name stands for "regular expression."</p> <p>This article uses <code>grep</code> because it doesn't require that you set up a particular coding environment or write any code to work with the examples of regular expressions demonstrated in this article. All you need to do is copy and paste an example onto the command line of a Linux terminal and you'll see results immediately. The <code>grep</code> command can be used in any shell.</p> <p>Because this article focuses on regular expressions as a language, and not on manipulating files, the examples use samples of text piped to <code>grep</code> instead of input files.</p> <h3>How to use grep against content in a file</h3> <p>To print lines in a file that match a regular expression, use the following syntax:</p> <pre> <code class="language-bash">$ grep -options <regular_expression> /paths/to/files</code></pre> <p>In this command syntax:</p> <ul> <li><code>-options</code>, if specified, control the behavior of the command.</li> <li><code><regular_expression></code> indicates the regular expression to execute against the files.</li> <li><code>/paths/to/files</code> indicate one or more files against which the regular will be executed.</li> </ul> <p>The options used in this article are:</p> <ul> <li><code>-P</code>: Apply regular expressions in the style of the Perl programming language. This option, which is specific to GNU/Linux, is used in the article to unlock powerful features that aren't recognized by <code>grep</code> by default. There is nothing specific to Perl in the regular expressions used in this article; the same features can be found in many programming languages.</li> <li><code>-i</code>: Match in a case-insensitive manner.</li> <li><code>-o</code>: Print only the characters matching the regular expression. By default, the whole line containing the matching string is printed.</li> </ul> <h3>How to pipe content to a regular expression</h3> <p>As mentioned earlier, you can also use a regular expression to filter output from stdout. The following example uses the pipe symbol (<code>|</code>) to feed the result of an <code>echo</code> command to <code>grep</code>.</p> <pre> <code class="language-bash">$ echo "I like using regular expressions." | grep -Po 'r.*ar'</code></pre> <p>The command produces the following output:</p> <pre> <code class="language-java">regular</code></pre> <p>Why does <code>grep</code> return the characters <code>regular</code> to match the regular expression specified here? We'll explore the reasons in subsequent sections of this article.</p> <h2>Regular characters, metacharacters, and patterns: The building blocks of regular expressions</h2> <p>You'll use three basic building blocks when working with regular expressions: <em>regular characters, metacharacters,</em> and <em>patterns.</em> Regular characters and metacharacters are used to create a regular expression, and that regular expression represents a matching pattern that the regex engine applies to some content.</p> <p>You can think of a metacharacter as a placeholder symbol. For example, the <code>.</code> metacharacter (a dot or period) represents "any character." The <code>\d</code> metacharacter represents any single numeral, 0 through 9.</p> <p>The <code>*</code> metacharacter is a shorthand that represents the instruction "search for a character that occurs zero or more times as defined by the preceding character." (You'll see how to work with the <code>*</code> metacharacter in sections to come.)</p> <p>Regular expressions support many metacharacters, each worthy of a page or two of description. For now, the important thing to understand is that a metacharacter is a reserved symbol used by the regex engine to describe a character in a generic manner. Also, certain metacharacters are a shorthand for a search instruction.</p> <p>You can combine regular characters with metacharacters to declare rules that define search patterns. For example, consider the following short regular expression:</p> <pre> <code class="language-java">.t</code></pre> <p>This matches a pattern consisting of two characters. The first character can be any character, as declared by the <code>.</code> (dot) metacharacter, but the second character must be <code>t</code>. Thus, applying the regular expression <code>.t</code> to the string <code>I like cats but not rats</code> matches the strings highlighted in bold font here:</p> <p><code>I like c<strong>at</strong>s b<strong>ut</strong> n<strong>ot</strong> r<strong>at</strong>s</code></p> <p>You can do a lot using just the basic metacharacters to create regular expressions with <code>grep</code>. The following sections provide a number of useful examples.</p> <h2>Running basic regular expressions</h2> <p>The following subsections demonstrate various examples of regular expressions. The examples are presented as two commands to enter in a Linux terminal. The first command creates a variable named <code>teststr</code> that contains a sample string. The second executes the <code>echo</code> command against <code>teststr</code> and pipes the result of the <code>echo</code> command to <code>grep</code>. The <code>grep</code> command then filters the input according to the associated regular expression.</p> <h3>How to declare an exact pattern match using regular characters</h3> <p>The following example demonstrates how to search a string according to the pattern of regular characters, <code>Fido</code>. The search declaration is case-sensitive:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'Fido'</code></pre> <p>The result is:</p> <pre> <code class="language-java">Fido</code></pre> <h3>How to declare a case-insensitive exact pattern match</h3> <p>The following example demonstrates how to search a string according to a pattern of regular characters, <code>fido</code>. The search declaration is case-insensitive, as indicated by the <code>-i</code> option in the <code>grep</code> command. Thus, the regex engine will find occurrences such as <code>FIDO</code> as well as <code>fido</code> or <code>fiDo</code>.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Poi 'fido'</code></pre> <p>The result is:</p> <pre> <code class="language-java">Fido</code></pre> <h3>How to declare a logical pattern match</h3> <p>The following example uses the <code>|</code> metacharacter symbol to search according to a <em>this or that</em> condition—that is, a condition that can be satisfied by either of the regular expressions on either side of <code>|</code>. In this case, the regular expression matches occurrences of the regular character <code>f</code> or <code>g</code>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'f|g'</code></pre> <p>The <code>grep</code> command identifies each occurrence that satisfies the rule declared in the regular expression. Conceptually, the regular expression is saying, <em>Return any character that is either an f or a g</em>. We are leaving the search case-sensitive, as is the default. Thus, the identified characters are highlighted in bold text here:</p> <p><code>Je<strong>ff</strong> and the pet Lucky. Gre<strong>gg</strong> and the do<strong>g</strong> Fido. Chris has 1 bird named Tweety.</code></p> <p>Because each character is identified and returned on a one-by-one basis, the output sent to the terminal window is:</p> <pre> <code class="language-java">f f g g g</code></pre> <h3>How to find a character at the beginning of a line</h3> <p>The following example uses the <code>^</code> metacharacter to search for the beginning of a line of text. Conceptually, the <code>^</code> metacharacter matches the beginning of a line.</p> <p>The example executes the regular expression <code>^J</code>. This regular expression searches for a match that satisfies two conditions. The first condition is to find the beginning of the line; the next is to find the regular character <code>J</code> at that position.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '^J'</code></pre> <p>The regular expression matches the character highlighted in bold text as shown here:</p> <p><code><strong>J</strong>eff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>The result returned to the terminal is:</p> <pre> <code class="language-java">J</code></pre> <h3>How to find a character at the end of a line</h3> <p>The following example uses the <code>$</code> metacharacter to search for the end of a line to text.</p> <p>The example executes the regular expression <code>\.$</code>. The regular expression declares a matching rule that has two conditions. First, the regular expression searches for an occurrence of the regular character <code>.</code> (dot). Then the regular expression looks to see whether the end of the line is next. Thus, if the <code>.</code> character comes at the end of the line, it's deemed a match.</p> <p>The regular expression includes a backslash (<code>\</code>) as an "escape" metacharacter before the dot. The escape metacharacter is needed to override the normal meaning of the dot as a metacharacter. Remember that the <code>.</code> (dot) metacharacter means <em>any character</em>. With the escape character, the dot is treated as a regular character, and so matches just itself:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '\.$'</code></pre> <p>The regular expression matches the final dot in the text, highlighted in bold as shown here:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety<strong>.</strong></code></p> <p>The result is just the final dot:</p> <pre> <code class="language-java">.</code></pre> <p>Suppose you were to use an unescaped dot in the regular expression:</p> <pre> <code class="language-bash">$ echo $teststr | grep -Po '.$'</code></pre> <p>You would get the same result as using the escaped dot, but a different logic is being executed. That logic is: <em>Match any character that is the last character before the end of the string</em>. Thus, the regular expression would always match any line. Using the escape character to identify a character as a regular character is a subtle distinction in this case, but an important one nonetheless.</p> <h3>How to find multiple characters at the end of a line</h3> <p>The following example searches the string assigned to the variable <code>teststr</code> to match the characters <code>ty.</code> when they appear at the end of a line.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'ty\.$'</code></pre> <p>The result is:</p> <pre> <code class="language-java">ty.</code></pre> <p>Again, note the user of the escape metacharacter (<code>\</code>) to declare the <code>.</code> (dot) character as a regular character.</p> <h3>How to find occurrences of a character using the metacharacters for matching numerals</h3> <p>The following example uses the <code>\d</code> metacharacter to create a regular expression that looks for matches of any numeral in a given piece of text.</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '\d'</code></pre> <p>Because each numeral is matched and returned on a one-by-one basis, the output sent to the terminal is:</p> <pre> <code class="language-java">9 2</code></pre> <h3>How to find a string using metacharacters for a numeral and a space</h3> <p>The following example uses the <code>\d </code>and <code>\s</code> metacharacters along with regular characters to create a regular expression that matches text according to the following logic: <em>Match any numeral that is followed by a space and then the regular characters </em><strong><em>cats</em></strong>.</p> <p>The <code>\d</code> metacharacter matches a numeral and the <code>\s</code> metacharacter matches a whitespace character (a space, a tab, or a few other rare characters):</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '\d\scats'</code></pre> <p>The result is:</p> <pre> <code class="language-java">9 cats</code></pre> <h3>How to combine metacharacters to create a complex regular expression</h3> <p>The following example uses the <code>\d</code> metacharacter to match a numeral, <code>\s</code> to match a space, and <code>.</code> (dot) to match any character. The regular expressions uses the <code>*</code> metacharacter to say, <em>Match zero or more successive occurrences of the preceding character.</em></p> <p>The logic expressed in the regular expression is this: <em>Find a string of text that starts with a numeral followed by a space character and the regular characters <strong>cats.</strong> Then keep going, matching any characters until you come to another numeral followed by a space character and the regular characters <strong>dogs</strong></em>:</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '\d\scats.*\d\sdogs'</code></pre> <p>The result is:</p> <pre> <code class="language-java">9 cats and 2 dogs</code></pre> <h3>How to traverse a line of text to a stop point</h3> <p>The following example uses the <code>.</code> (dot) metacharacter and <code>*</code> along with the regular characters <code>cats</code> to create a regular expression with the following logic: <em>Match any character zero or more times until you come to the characters <strong>cats</strong></em>:</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '.*cats'</code></pre> <p>The result is:</p> <pre> <code class="language-java">There are 9 cats</code></pre> <p>The interesting thing about this regular expression is that starting from the beginning of the line is implicit. The <code>^</code> metacharacter could be used to indicate the start of a line, but because the regular expression matches any characters until you come to <code>cats</code>, it isn't necessary to explicitly declare the start of the line using <code>^</code>. The regular expression starts processing from the beginning of the line by default.</p> <h2>Regular expressions uncover patterns in text</h2> <p>Regular expressions offer a powerful yet concise way to do complex text filtering. You can use them in programming languages such as JavaScript, Python, Perl, and C++, and directly in a Linux terminal to process files and text using the <code>grep</code> command, as demonstrated in this article.</p> <p>Getting the hang of regular expressions takes time. Mastering the intricacies of working with the metacharacters alone can be daunting. Fortunately, the learning curve is developmental. You don't have to master the entirety of regular expressions to work with them usefully as a beginner. You can start with the basics, and as you learn more you can do more. Just being able to do pattern matching using the basic examples shown in this article can provide immediate benefit.</p> <p>An upcoming article in this series will explain regular expression features that are even more powerful.</p> The post <a href="https://developers.redhat.com/articles/2022/09/14/beginners-guide-regular-expressions-grep" title="A beginner’s guide to regular expressions with grep">A beginner’s guide to regular expressions with grep</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Bob Reselman2022-09-14T07:00:00ZRemote dev-watch development with WildFly Jar Maven PluginEmmanuel Hugonnethttps://wildfly.org//news/2022/09/14/Remote-dev-watch/2022-09-14T00:00:00ZThe 8.0.0.Alpha2 version of the has been released. This is not yet Final, as it is only there to gather feedback on a new feature that simplifies development on the "cloud" using the dev-watch goal. For people who are not familiar with WildFly bootable JAR and its dev-watch goal, I strongly recommend that you read this that covers it in details. DEV-WATCH GOAL The current dev-watch goal, although offering an efficient workflow to develop WildFly applications, requires the bootable application or server to run locally, in the same place as the project. The improvement made on this release is to allow the bootable application or server to run remotely so that it can be in an environment that is closer to the target runtime environment. We are going to use to see how we can work remotely. Important This application applies the script anonymous-management.cli which disable security on the Management API of WildFly, please make sure not to include it when going to production. DEVELOPPING WITH A DOCKER CONTAINER. BUILD AND RUN THE APPLICATION WITH DOCKER The first step is to create the container image where the application is running. For this we are going to use a very simple Dockerfile: FROM registry.access.redhat.com/ubi8/openjdk-11:latest COPY --chown=jboss:root target/*.jar /deployments/. RUN chmod -R ug+rwX /deployments/. To build that container image we are executing: $ mvn clean install $ podman build -f Dockerfile -t remote-microprofile-config:latest And then we are going to run the container and expose the ports 8080 and 9990: $ podman run -p 8080:8080 -p 9990:9990 -it remote-microprofile-config:latest DEVELOP AND UPDATE THIS APPLICATION Now we need to run the dev-watch goal and remotely attach to the Wildfly Management API. For this we need to execute the following command line: $ mvn org.wildfly.plugins:wildfly-jar-maven-plugin:8.0.0.Alpha2:dev-watch \ -Dwildfly.bootable.remote=true \ -Dwildfly.bootable.remote.username=admin \ -Dwildfly.bootable.remote.password=passW0rd! \ -Dwildfly.hostname=${container.ip.address} Check that the application is running properly : $ curl http://${container.ip.address}:8080 config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Value for config2 comes from a properties file inside the application config3 = Default value for config3 comes from my code Once this is done you can edit the code and your changes will be automatically pushed to the remote container. For example: * Change the config2 property value to be "Hello from dev-watch remote" in the file: src/main/resources/META-INF/microprofile-config.properties. * Save your changes * The application is redeployed and the new configuration will be taken into account: $ curl http://${container.ip.address}:8080 config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Hello from dev-watch remote config3 = Default value for config3 comes from my code DEVELOPPING ON OPENSHIFT. BUILD AND RUN THE APPLICATION WITH OPENSHIFT We first need to build the application : $ mvn clean install Then to deploy it you need to drag and drop the produced remote-microprofile-config-bootable.jar on the Topology page on OpenShift. Now we need to expose the management API of WilFly by first editing the service to add a TCP port for 9990, and then add a route to that port: $ oc create route edge management-remote-microprofile-config-bootable --service=remote-microprofile-config-bootable --port=9990 --insecure-policy='Redirect' DEVELOP AND UPDATE THIS APPLICATION Now we need to run the dev-watch goal and remotely attach to the Wildfly Management API. For this we need to execute the following command line: $ mvn -P bootable-jar-remote -Dwildfly.hostname=$(oc get route management-remote-microprofile-config-bootable --template='{{ .spec.host }}') install You may also use a command like this one: $ mvn org.wildfly.plugins:wildfly-jar-maven-plugin:8.0.0.Alpha2:dev-watch \ -Dwildfly.bootable.remote=true \ -Dwildfly.port=443 \ -Dwildfly.bootable.remote.protocol=remote+https \ -Dwildfly.hostname=$(oc get route management-remote-microprofile-config-bootable --template='{{ .spec.host }}') Check that the application is running properly : $ curl https://$(oc get route remote-microprofile-config-bootable --template='{{ .spec.host }}') config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Value for config2 comes from a properties file inside the application config3 = Default value for config3 comes from my code Once this is done you can edit the code and your changes will be automatically pushed to the OpenShift instance. For example: * Change the config2 property value to be "Hello from dev-watch remote" in the file: src/main/resources/META-INF/microprofile-config.properties. * Save your changes * The application is redeployed and the new configuration will be taken into account: $ curl https://$(oc get route remote-microprofile-config-bootable --template='{{ .spec.host }}') config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Hello from dev-watch remote config3 = Default value for config3 comes from my code CONCLUSION We hope that you are seeing the benefits of the new features that this release is bringing. We would really appreciate your on the dev-watch goal. We aim toward a smooth and efficient first class WildFly developer experience and we need you there! Thank-you.Emmanuel HugonnetKafka Monthly Digest: August 2022Mickael Maison195c9344-8250-48a9-9554-85515b7d00a92022-09-13T07:00:00Z2022-09-13T07:00:00Z<p>This 55th edition of the <a href="https://developers.redhat.com/topics/kafka-kubernetes">Kafka</a> Monthly Digest covers what happened in the <a href="https://kafka.apache.org/">Apache Kafka</a> community in August 2022.</p> <p>For last month’s digest, see <a href="https://developers.redhat.com/articles/2022/08/04/kafka-monthly-digest-july-2022">Kafka Monthly Digest: July 2022</a>.</p> <h2>Releases</h2> <p>There is currently one release in progress, 3.3.0.</p> <h3>3.3.0</h3> <p>The release process for 3.3.0 continued. José Armando García Sancio published the first release candidate on August 29. A few issues, including <a href="https://issues.apache.org/jira/browse/KAFKA-14187">KAFKA-14187</a> and <a href="https://issues.apache.org/jira/browse/KAFKA-14156">KAFKA-14156</a>, were found during testing, so José built RC1 on September 1. The vote is currently ongoing. You can find the <a href="https://cwiki.apache.org/confluence/display/KAFKA/Release+Plan+3.3.0">release plan</a> in the wiki.</p> <h2>Kafka Improvement Proposals</h2> <p>Last month, the community submitted three <a href="https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals">Kafka Improvement Proposals (KIPs)</a> (KIP-863 to KIP-865). I'll highlight a couple of them:</p> <ul> <li><p><a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-864%3A+Add+End-To-End+Latency+Metrics+to+Connectors">KIP-864: Add End-To-End Latency Metrics to Connectors</a>. This KIP proposes adding a few new metrics to track end-to-end latency for records flowing through Connect. This would also include metrics tracking the time spent in converters.</p></li> <li><p><a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-865%3A+Support+--bootstrap-server+in+kafka-streams-application-reset">KIP-865: Support --bootstrap-server in kafka-streams-application-reset</a>. This very small KIP aims at addressing a discrepancy with the <code>kafka-streams-application-reset.sh</code> tool. This tool currently uses the <code>--bootstrap-servers</code> flag, while all other tools use <code>--bootstrap-server</code>, so it will be updated for consistency.</p></li> </ul> <h2>Community releases</h2> <ul> <li><a href="https://github.com/tchiotludo/akhq/releases/tag/0.22.0">akhq 0.22</a>: AKHQ is a GUI for Apache Kafka. This new version adds a few new features, including support for listing ACLs on Cluster and TransactionalIds and sending Protobuf records via the UI.</li> <li><a href="https://github.com/tulios/kafkajs/releases/tag/v2.2.0">kafkajs 2.2.0</a>: Kafkajs is a pure JavaScript Kafka client for Node.js. This release adds support for triggering and listing partition reassignments in its Admin API and contains a few fixes.</li> </ul> <h2>Blogs</h2> <p>I selected some interesting blog articles that were published last month:</p> <ul> <li><a href="https://towardsdatascience.com/machine-learning-streaming-with-kafka-debezium-and-bentoml-c5f3996afe8f">Machine Learning Streaming with Kafka, Debezium, and BentoML</a></li> <li><a href="https://medium.com/event-driven-utopia/building-cqrs-views-with-debezium-kafka-materialize-and-apache-pinot-part-1-4f697735b2e4">Building CQRS Views with Debezium, Kafka, Materialize, and Apache Pinot — Part 1</a></li> <li><a href="https://medium.com/event-driven-utopia/building-cqrs-views-with-debezium-kafka-materialize-and-apache-pinot-part-2-6899e9efc74e">Building CQRS Views with Debezium, Kafka, Materialize, and Apache Pinot — Part 2</a></li> </ul> <p>To learn more about Kafka, visit <a href="https://developers.redhat.com/topics/kafka-kubernetes">Red Hat Developer's Apache Kafka topic page</a>.</p> The post <a href="https://developers.redhat.com/articles/2022/09/13/kafka-monthly-digest-august-2022" title="Kafka Monthly Digest: August 2022">Kafka Monthly Digest: August 2022</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Mickael Maison2022-09-13T07:00:00Z +JBoss Tools Aggregated FeedJBoss Tools Aggregated FeedJBoss ToolsJoin the Red Hat team at NodeConf EU 2022Lucas Holmquistaf2d04c4-c425-4b66-b2db-7b7fc5ba23c02022-09-23T07:00:00Z2022-09-23T07:00:00Z<p>It's that time of the year again, and NodeConf EU is almost upon us. This annual event is one of the leading <a data-entity-substitution="canonical" data-entity-type="node" data-entity-uuid="43652567-d1ab-4765-a588-4e905032ad7f" href="https://developers.redhat.com/topics/nodejs" title="Node.js: Develop server-side JavaScript applications">Node.js</a> events in Europe. It brings together contributors and innovators from the Node.js community to deliver a wide range of talks and workshops.</p> <p>The conference will be back in person this year after being virtual for the past two years on October 3rd–5th in Kilkenny, Ireland.</p> <p>The Node.js team here at Red Hat will be talking about lesser-known Node.js Core modules as well as guiding attendees through a workshop that will get you familiar with cloud-native development with Node.js. </p> <h2>Talk: Journey into mystery: Lesser-known Node Core modules and APIs</h2> <p>Wednesday, October 4th, 2022, 9:30 UTC</p> <p>Presenter: Luke Holmquist (<a href="https://twitter.com/sienaluke">@sienaluke</a>), Senior Software Engineer, Red Hat</p> <p>One of the key concepts of Node.js is its modular architecture, and Node makes it very easy to use a wide variety of modules and <a href="https://developers.redhat.com/topics/api-management">APIs</a> from the community. Some of the modules and APIs that are part of Node.js Core are very familiar, like HTTP and Events. But what about those lesser-known core modules just waiting to be used? This talk will journey into mystery as we explore some of the lesser-known Core modules and APIs that Node.js offers.</p> <h2>Workshop: Elevating Node.js applications to the cloud</h2> <p>Wednesday, October 4th, 2022, 3:00 UTC</p> <p>Presenters:</p> <ul> <li>Bethany Griggs, Senior Software Engineer, Red Hat</li> <li>Michael Dawson (<a href="https://twitter.com/mhdawson1">@mhdawson1</a>), Node.js Lead, Red Hat</li> <li>Luke Holmquist (<a href="https://twitter.com/sienaluke">@sienaluke</a>), Senior Software Engineer, Red Hat</li> </ul> <p>This workshop provides an introduction to cloud-native development with Node.js. We will walk you through building cloud-native Node.js applications, incorporating typical components, including observability components for <a href="https://developers.redhat.com/articles/2021/05/10/introduction-nodejs-reference-architecture-part-2-logging-nodejs">logging</a>, metrics, and more. Next, we'll show you how to deploy your application to cloud environments. The workshop will cover cloud-native concepts and technologies, including health checks, metrics, building <a href="https://developers.redhat.com/topics/containers">containers</a>, and deployments to <a href="https://developers.redhat.com/topics/kubernetes">Kubernetes</a>.</p> <p>For a full list of the various talks and workshops, check out the <a href="https://www.nodeconf.eu/agenda">NodeConf EU 2022 agenda</a>.</p> <h2>Collaborator Summit</h2> <p>There will also be a OpenJS Collaborator Summit in Dublin, Ireland on October 1-2, 2022, two days before NodeConf EU. We hope to see you there to discuss all things <a href="https://developers.redhat.com/topics/javascript">JavaScript</a> and Node.js. Our team members will be leading or active participants in many sessions.</p> <p>The Collab Summit is for maintainers or core contributors of an OpenJS project, plus any open source enthusiast interested in participating. This is the time for deep dives on important topics and to meet with people working across your favorite JavaScript projects. Get more details on the <a href="https://openjsf.org/blog/2022/09/01/openjs-collaborator-summit-join-us-in-dublin-virtual-october-1-2%EF%BF%BC/">OpenJS website</a>.</p> <h2>More Node.js resources</h2> <p>Don't miss the latest installments of our series on the <a href="https://developers.redhat.com/blog/2021/03/08/introduction-to-the-node-js-reference-architecture-part-1-overview">Node.js reference architecture</a>.</p> <p>If you want to learn more about Red Hat and IBM’s involvement in the Node.js community and what we are working on, check out our topic pages at <a href="https://developers.redhat.com/topics/nodejs">Red Hat Developer</a> and <a href="https://developer.ibm.com/languages/node-js/">IBM Developer</a>. </p> The post <a href="https://developers.redhat.com/articles/2022/09/23/join-red-hat-team-nodeconf-eu-2022" title="Join the Red Hat team at NodeConf EU 2022">Join the Red Hat team at NodeConf EU 2022</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Lucas Holmquist2022-09-23T07:00:00ZCreating your first cloud-agnostic serverless application with JavaHelber Belmirohttps://blog.kie.org/2022/09/creating-your-first-cloud-agnostic-serverless-application-with-java.html2022-09-22T10:35:00ZIf you are new to Serverless Workflow or serverless in general, creating a simple application for a serverless infrastructure is a good place to start. In this article, you will run through the steps to create your first serverless Java application that runs on any cloud. WHAT IS SERVERLESS? Contrary to what the name says, there are still servers in serverless, but you don’t need to worry about managing them. You just need to deploy your containers and the serverless infrastructure is responsible for providing resources to your application scale up or down. The best part is that it automatically scales up when there is a high demand or scales to zero when there is no demand. This will reduce the amount of money you spend with the cloud. WHAT WILL YOU CREATE? You will use Quarkus to create a simple Java application that returns a greeting message to an HTTP request and deploy it to Knative. WHY KNATIVE? In the beginning, serverless applications used to consist of small pieces of code that were run by a cloud vendor, like AWS Lambda. In this first phase, the applications had some limitations and were closely coupled to the vendor libraries. Knative enables developers to run serverless applications on a Kubernetes cluster. This gives you the flexibility to run your applications on any cloud, on-premises, or even mix all of them. WHY QUARKUS? Because serverless applications need to start fast. Since the biggest advantage of serverless is scale up and down (even zero) according to demand, serverless applications need to start fast when scaling up, otherwise, requests would be denied. One of the greatest characteristics of Quarkus applications is their super fast start-up. Also, Quarkus is , which means that it’s easy to deploy Quarkus applications to Kubernetes without having to understand the intricacies of the underlying Kubernetes framework. REQUIREMENTS * A local Knative installation. See . * This article uses minikube as the local Kubernetes cluster. * kn CLI installed. See . * JDK 11+ installed with JAVA_HOME configured appropriately. * Apache Maven 3.8.1+. * GraalVM (optional to deploy a native image). CREATE A QUARKUS APPLICATION > NOTE: If you don’t want to create the application, you can just clone it > from  and skip to  mvn io.quarkus.platform:quarkus-maven-plugin:2.11.2.Final:create \ -DprojectGroupId=org.acme \ -DprojectArtifactId=knative-serving-quarkus-demo cd knative-serving-quarkus-demo RUN YOUR APPLICATION LOCALLY To verify that you created the project correctly, run the project locally by running the following command: mvn quarkus:dev After downloading the dependencies and building the project, you should see an output similar to: __ ____ __ _____ ___ __ ____ ______ --/ __ \/ / / / _ | / _ \/ //_/ / / / __/ -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \ --\___\_\____/_/ |_/_/|_/_/|_|\____/___/ 2022-08-15 16:50:25,135 INFO [io.quarkus] (Quarkus Main Thread) knative-serving-quarkus-demo 1.0.0-SNAPSHOT on JVM (powered by Quarkus 2.11.2.Final) started in 1.339s. Listening on: http://localhost:8080 2022-08-15 16:50:25,150 INFO [io.quarkus] (Quarkus Main Thread) Profile dev activated. Live Coding activated. 2022-08-15 16:50:25,150 INFO [io.quarkus] (Quarkus Main Thread) Installed features: [cdi, resteasy-reactive, smallrye-context-propagation, vertx] On a different terminal window or in the browser, you can access the application by sending a request to the  endpoint: curl -X 'GET' 'http://localhost:8080/hello' -H 'accept: text/plain' If you see the following output, then you have successfully created your application: Hello from RESTEasy Reactive Hit Ctrl + C to stop the application. PREPARE YOUR APPLICATION FOR DEPLOYMENT TO KNATIVE ADD THE REQUIRED DEPENDENCIES Add the following dependencies to the pom.xml file: <dependency> <groupId>io.quarkus</groupId> <artifactId>quarkus-kubernetes</artifactId> </dependency> <dependency> <groupId>io.quarkus</groupId> <artifactId>quarkus-container-image-jib</artifactId> </dependency> CONFIGURE THE APPLICATION FOR DEPLOYMENT TO KNATIVE Add the following configuration to the src/main/resources/application.properties file: quarkus.kubernetes.deployment-target=knative quarkus.container-image.group=dev.local/hbelmiro > NOTE: In the quarkus.container-image.group property, replace hbelmiro with > your container registry username. DEPLOY YOUR APPLICATION TO KNATIVE START THE MINIKUBE TUNNEL > NOTE: This step is only necessary if you are using minikube as the local > Kubernetes cluster. On a different terminal window, run the following command to start the minikube tunnel: minikube tunnel --profile knative You should see an output similar to the following: Status: machine: knative pid: 223762 route: 10.96.0.0/12 -> 192.168.49.2 minikube: Running services: [kourier] errors: minikube: no errors router: no errors loadbalancer emulator: no errors Leave the terminal window open and running the above command. CONFIGURE THE CONTAINER CLI TO USE THE CONTAINER ENGINE INSIDE MINIKUBE eval $(minikube -p knative docker-env) DEPLOY THE APPLICATION Run the following command to deploy the application to Knative: mvn clean package -Dquarkus.kubernetes.deploy=true You should see an output similar to the following: [INFO] [io.quarkus.kubernetes.deployment.KubernetesDeployer] Deploying to knative server: https://192.168.49.2:8443/ in namespace: default. [INFO] [io.quarkus.kubernetes.deployment.KubernetesDeployer] Applied: Service knative-serving-quarkus-demo. [INFO] [io.quarkus.deployment.QuarkusAugmentor] Quarkus augmentation completed in 8952ms [INFO] ------------------------------------------------------------------------ [INFO] BUILD SUCCESS [INFO] ------------------------------------------------------------------------ CHECK THE KNATIVE DEPLOYED SERVICES Run the following command to check the Knative deployed services: kn service list You should see your application listed on the deployed services like the following: NAME URL LATEST AGE CONDITIONS READY REASON knative-serving-quarkus-demo http://knative-serving-quarkus-demo.default.10.106.207.219.sslip.io knative-serving-quarkus-demo-00001 23s 3 OK / 3 True > IMPORTANT: In the above output, check the READY status of the application. If > the status is not True, then you need to wait for the application to be ready, > or there is a problem with the deployment. SEND A REQUEST TO THE DEPLOYED APPLICATION Use the URL returned by the above command to send a request to the deployed application. curl -X 'GET' 'http://knative-serving-quarkus-demo.default.10.106.207.219.sslip.io/hello' -H 'accept: text/plain' You should see the following output: Hello from RESTEasy Reactive GOING NATIVE You can create a native image of your application to make it start even faster. To do that, deploy your application by using the following command: mvn clean package -Pnative -Dquarkus.native.native-image-xmx=4096m -Dquarkus.native.remote-container-build=true -Dquarkus.kubernetes.deploy=true > IMPORTANT: -Dquarkus.native.native-image-xmx=4096m is the amount of memory > Quarkus can use to generate the native image. You should adjust it or > completely remove it depending on your local machine’s specifications. NOW YOU ARE READY TO RUN SERVERLESS APPLICATIONS USING JAVA Easy, isn’t it? Quarkus and Knative give you the freedom to run serverless applications using Java on-premises or in the cloud, no matter the vendor. You can even mix more than one cloud vendor with your on-premises infrastructure. This flexibility brings you agility and reduces your costs with infrastructure. NEXT STEP If you want to go further on serverless with more exciting stuff, check out  The post appeared first on .Helber BelmiroLearn about the new BGP capabilities in Red Hat OpenStack 17Daniel Alvarez Sanchezd3766211-f376-45f2-b86d-2b3cbe44900a2022-09-22T07:00:00Z2022-09-22T07:00:00Z<p>The <a href="https://www.redhat.com/en/technologies/linux-platforms/openstack-platform">Red Hat OpenStack Platform</a> is an Infrastructure-as-a-Service (IaaS) offering from Red Hat. Version 17.0 of the platform includes dynamic routing for both the control and data planes. This lets you deploy a cluster in a pure layer-3 (L3) data center, overcoming the scaling issues of traditional layer-2 (L2) infrastructures such as large failure domains, large broadcast traffic, or long convergence times in the event of failures.</p> <p>This article will illustrate this new feature by outlining a simple three-rack spine and leaf topology, where the layer-2 boundaries are within each rack on the Red Hat OpenStack Platform. The control plane spans the three racks, and each rack also hosts a compute node. Figure 1 illustrates our topology.</p> <div class="rhd-c-figure"> <article class="align-center media media--type-image media--view-mode-article-content"> <div class="field field--name-image field--type-image field--label-hidden field__items"> <a href="https://developers.redhat.com/sites/default/files/fig1_12.png" data-featherlight="image"><img src="https://developers.redhat.com/sites/default/files/styles/article_floated/public/fig1_12.png?itok=5G4F2AYN" width="600" height="285" alt="Diagram showing two leaf nodes connecting each control node to the spines." loading="lazy" typeof="Image" /> </a> </div> <div class="field field--name-field-caption field--type-string field--label-hidden field__items"> <div class="rhd-c-caption field__item"> Figure 1: Two leaf nodes connect each control node to the spines. </div> </div> </article> </div> <p>The main characteristics of this deployment are:</p> <ul> <li>Border Gateway Protocol (BGP) is running on every element in the network: controllers, computes, leaves, and spine. The Red Hat OpenStack Platform uses <a href="https://frrouting.org/">FRRouting</a> (FRR) to enable BGP in the overcloud nodes, and it operates here as follows: <ul> <li>Leaves are configured as route reflectors, re-advertising learned routes to the spine.</li> <li>The IPv6 link-local address of each interface uses <em>BGP Unnumbered</em> to establish BGP sessions. There is no need to assign and configure unique IP addresses on these interfaces, simplifying the deployment.</li> <li>FRR advertises all local IP addresses (that is, /32 on IPv4 or /128 on IPv6) as directly connected host routes.</li> </ul> </li> <li>Each device has outgoing default <a href="https://study-ccna.com/ecmp-equal-cost-multi-path/">equal-cost multi-path routing</a> (ECMP) routes for load balancing and high availability (no L2 bonds).</li> <li><a href="https://datatracker.ietf.org/doc/rfc5880/">Bidirectional Forwarding Detection</a> (BFD), which is <a href="https://opendev.org/openstack/tripleo-ansible/src/commit/7da489819193352f009949f10fe988809a607ab7/tripleo_ansible/roles/tripleo_frr/defaults/main.yml#L23-L32">configurable</a>, is used for network failure detection for fast convergence times.</li> <li><a href="https://docs.hpc.cam.ac.uk/cloud/userguide/02-neutron.html">OpenStack Neutron</a> and <a href="https://www.ovn.org/en/">Open Virtual Network</a> (OVN) are agnostic and require no changes or configuration. <ul> </ul> </li> </ul> <h3>Constraints and limitations</h3> <p>Before we move on, it's worth noting the constraints and limitations of the implementation shown in this article:</p> <ul> <li>This feature will only work with the Neutron <a href="https://docs.openstack.org/neutron/latest/admin/config-ml2.html">ML2/OVN</a> mechanism driver.</li> <li>Workloads in provider networks and floating IP addresses are advertised. Routes to these workloads go directly to the compute node hosting the virtual machine (VM).</li> <li>Tenant networks can <a href="https://opendev.org/openstack/tripleo-ansible/src/commit/2381a7c3b246713744ab259ea8ac22be826344cb/tripleo_ansible/roles/tripleo_frr/defaults/main.yml#L69">optionally be advertised</a>, but: <ul> <li>Overlapping CIDRs are not supported. Tenants need to ensure uniqueness (e.g., through the use of <a href="https://docs.openstack.org/neutron/wallaby/admin/config-address-scopes.html">address scopes</a>).</li> <li>Traffic to workloads in tenant networks traverses the gateway node.</li> </ul> </li> <li>An <a href="https://opendev.org/x/ovn-bgp-agent">agent</a> is required to run on each overcloud node. This agent is responsible for steering the traffic to or from the OVN overlay, as well as triggering FRR to advertise the IPv4 or IPv6 addresses of the workloads.</li> <li>The provider bridge (typically <code>br-ex</code> or <code>br-provider</code>) is not connected to a physical NIC or bond. Instead, egress traffic from the local VMs is processed by an extra routing layer in the Linux kernel. Similarly, ingress traffic is processed by this extra routing layer and forwarded to OVN through the provider bridge.</li> <li>There is no support for datapath acceleration, because the agent relies on kernel networking to steer the traffic between the NICs and OVN. Acceleration mechanisms such as <a href="https://docs.openvswitch.org/en/latest/intro/install/dpdk/">Open vSwitch with DPDK</a> or <a href="https://docs.openstack.org/neutron/rocky/admin/config-ovs-offload.html">OVS hardware offloading</a> are not supported. Similarly, <a href="https://www.networkworld.com/article/3535850/what-is-sr-iov-and-why-is-it-the-gold-standard-for-gpu-sharing.html">SR-IOV</a> is not compatible with this configuration because it skips the hypervisor.</li> </ul> <h2>Control plane</h2> <p>With this configuration, the control plane no longer has to be in the same L3 network as the endpoints, because endpoints are advertised via BGP and traffic is <em>routed</em> to the nodes hosting the services.</p> <p><em>High availability</em> (HA) is provided fairly simply. Instead of announcing the VIP location upon failover by sending broadcast GARPs to the upstream switch, <a href="https://clusterlabs.org/pacemaker/doc/2.1/Pacemaker_Explained/singlehtml/">Pacemaker</a> just configures the VIP addresses in the loopback interface, which triggers FRR to advertise a directly connected host route to it.</p> <h3>Sample traffic route</h3> <p>Let's take the example of the control plane's <a href="http://www.haproxy.org">HAproxy </a>endpoint and check its Pacemaker configuration:</p> <pre> <code class="language-java">[root@ctrl-1-0 ~]# pcs constraint colocation config Colocation Constraints: ip-172.31.0.1 with haproxy-bundle (score:INFINITY) [root@ctrl-1-0 ~]# pcs resource config ip-172.31.0.1 Resource: ip-172.31.0.1 (class=ocf provider=heartbeat type=IPaddr2) Attributes: cidr_netmask=32 ip=172.31.0.1 nic=lo Meta Attrs: resource-stickiness=INFINITY Operations: monitor interval=10s timeout=20s (ip-172.31.0.1-monitor-interval-10s) start interval=0s timeout=20s (ip-172.31.0.1-start-interval-0s) stop interval=0s timeout=20s (ip-172.31.0.1-stop-interval-0s) [root@ctrl-1-0 ~]# ip addr show lo 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet 172.31.0.1/32 scope global lo valid_lft forever preferred_lft forever ...</code></pre> <p>After Pacemaker configures the VIP in one of the nodes, it configures this IP address in the <code>lo</code> interface, triggering FRR to advertise a directly connected route on that node:</p> <pre> <code class="language-java">[root@ctrl-1-0 ~]# podman exec -it frr vtysh -c "show ip bgp" | grep 172.31.0.1 *> 172.31.0.1/32 0.0.0.0 0 0 32768 ?</code></pre> <p>Now we can explore the route to this IP address, which is hosted by <code>ctrl-1-0</code>, from the <code>leaf-2-1</code> leaf node in <code>rack-2</code>:</p> <pre> <code class="language-java"># for i in leaf-2-1 spine-2 spine-1 leaf-1-1 leaf-1-2; do ssh $i ip route show 172.31.0.1; done Warning: Permanently added 'leaf-2-1' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 330 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:fefe:158a dev eth2 weight 1 nexthop via inet6 fe80::5054:ff:fe55:bdf dev eth1 weight 1 Warning: Permanently added 'spine-2' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 161 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:feb4:d2d0 dev eth3 weight 1 nexthop via inet6 fe80::5054:ff:fec5:7bad dev eth2 weight 1 Warning: Permanently added 'spine-1' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 439 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:fe6f:466b dev eth3 weight 1 nexthop via inet6 fe80::5054:ff:fe8d:c63b dev eth2 weight 1 Warning: Permanently added 'leaf-1-1' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 142 via 100.65.1.2 dev eth3 proto bgp metric 20 Warning: Permanently added 'leaf-1-2' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 123 via 100.64.0.2 dev eth3 proto bgp metric 20</code></pre> <p>Traffic directed to the OpenStack control plane VIP (172.31.0.1) from <code>leaf-2-1</code> goes through either the <code>eth1</code> (on <code>spine-1</code>) or <code>eth2</code> (on <code>spine-2</code>) ECMP routes. The traffic continues from <code>spine-1</code> on ECMP routes again to <code>leaf-1-1</code> , or from <code>spine-2</code> to <code>leaf1-2</code>. Finally, the traffic goes through <code>eth3</code> to the controller hosting the service, <code>ctrl-1-0</code>.</p> <h3>High availability through BFD</h3> <p>As mentioned earlier, BFD is running in the network to detect network failures. In order to illustrate its operation, following the example in the previous section, let's take down the NIC in <code>leaf-1-1</code> that connects to the controller node, and see how the routes adjust on the <code>spine-1</code> node to go through the other leaf in the same rack.</p> <p>Initially, there is an ECMP route in the <code>spine-1</code> node to the VIP that sends the traffic to both leaves in rack 1:</p> <pre> <code class="language-java">[root@spine-1 ~]# ip route show 172.31.0.1 172.31.0.1 nhid 179 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:fe6f:466b dev eth3 weight 1 nexthop via inet6 fe80::5054:ff:fe8d:c63b dev eth2 weight 1</code></pre> <p>Now let's bring down the interface that connects <code>leaf-1-1</code> to <code>ctrl-1-0</code>, which is hosting the VIP:</p> <pre> <code class="language-java">[root@leaf-1-1 ~]# ip link set eth3 down</code></pre> <p>The BFD state changes to <code>down</code> for this interface, and the route has been withdrawn in the spine, which now goes only through <code>leaf-1-2</code>:</p> <pre> <code class="language-java"> [root@leaf-1-1 ~]# tail -f /var/log/frr/frr.log | grep state-change 2022/09/08 12:14:47 BFD: [SEY1D-NT8EQ] state-change: [mhop:no peer:100.65.1.2 local:100.65.1.1 vrf:default ifname:eth3] up -> down reason:control-expired [root@spine-1 ~]# ip route show 172.31.0.1 172.31.0.1 nhid 67 via inet6 fe80::5054:ff:fe6f:466b dev eth3 proto bgp metric 20</code></pre> <p>Similarly, if we bring up the interface again, BFD will detect this condition and the ECMP route will be re-installed.</p> <p>The newly introduced <code>frr</code> container runs in all controller, network, and compute nodes. Its configuration can be queried through the following command:</p> <pre> <code class="language-bash">$ sudo podman exec -it frr vtysh -c 'show run'</code></pre> <h2>Data plane</h2> <p>The data plane refers here to the workloads running in the OpenStack cluster. This section describes the main pieces introduced in this configuration to allow VMs to communicate in a Layer-3 only datacenter.</p> <h3>OVN BGP Agent</h3> <p>The <a href="https://opendev.org/x/ovn-bgp-agent">OVN BGP Agent</a> is a <a href="https://developers.redhat.com/topics/python">Python</a>-based daemon that runs on every compute and network node. This agent connects to the OVN southbound database and keeps track of when a workload is spawned or shut down on a particular hypervisor. The agent then triggers FRR to advertise or withdraw its IP addresses, respectively. The agent is also responsible for configuring the extra routing layer between the provider bridge (<code>br-ex</code> or <code>br-provider</code>) and the physical NICs.</p> <h3>BGP advertisement</h3> <p>The same principle shown earlier for the control plane applies to the data plane. The difference is that for the control plane, Pacemaker configures the IP addresses to the loopback interface, whereas for the data plane, the OVN BGP Agent adds the addresses to a local <a href="https://access.redhat.com/solutions/5855721">VRF</a>. The VRF is used for isolation, because we don't want these IP addresses to interfere with the host routing table. We just want to trigger FRR to advertise and withdraw the addresses as appropriate (Figure 2).</p> <div class="rhd-c-figure"> <article class="align-center media media--type-image media--view-mode-article-content"> <div class="field field--name-image field--type-image field--label-hidden field__items"> <a href="https://developers.redhat.com/sites/default/files/fig2_10.png" data-featherlight="image"><img src="https://developers.redhat.com/sites/default/files/styles/article_floated/public/fig2_10.png?itok=fFZZXz67" width="600" height="479" alt="Diagram showing that the OVN BGP Agent controls FRR in order to advertise/withdraw routes." loading="lazy" typeof="Image" /> </a> </div> <div class="field field--name-field-caption field--type-string field--label-hidden field__items"> <div class="rhd-c-caption field__item"> Figure 2: The OVN BGP Agent controls FRR in order to advertise/withdraw routes. </div> </div> </article> </div> <div> </div> <h3>Traffic routing</h3> <p>As mentioned earlier, OVN has not been modified in any way to support this configuration. Thus, OVN believes that the L2 broadcast domain of the provider networks spans multiple hypervisors, but this is not true anymore. Both ingress and egress traffic require an extra layer of routing. The OVN BGP Agent is responsible for configuring this layer through the following actions:</p> <ol> <li> <p>Enable an ARP/NDP proxy in the provider bridge. Requests don't hit the destination because there's no L2 connectivity, so they're answered locally by the kernel:</p> <pre> <code class="language-bash">$ sysctl net.ipv4.conf.br-ex.proxy_arp net.ipv4.conf.br-ex.proxy_arp = 1 $ sysctl net.ipv6.conf.br-ex.proxy_ndp net.ipv6.conf.br-ex.proxy_ndp = 1</code></pre> </li> <li> <p>For ingress traffic, add host routes in the node to forward the traffic to the provider bridge:</p> <pre> <code class="language-bash">$ sudo ip rule show | grep br-ex 32000: from all to 172.24.100.217 lookup br-ex $ sudo ip route show table br-ex default dev br-ex scope link 172.24.100.217 dev br-ex scope link</code></pre> </li> <li> <p>For egress traffic, add flows that change the destination MAC address to that of the provider bridge, so that the kernel will forward the traffic using the default outgoing ECMP routes:</p> <pre> <code class="language-bash">$ ip link show br-ex 7: br-ex: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 link/ether 3e:cc:28:d7:10:4e brd ff:ff:ff:ff:ff:ff $ sudo ovs-ofctl dump-flows br-ex cookie=0x3e7, duration=48.114s, table=0, n_packets=0, n_bytes=0, priority=900,ip,in_port="patch-provnet-b" actions=mod_dl_dst:3e:cc:28:d7:10:4e,NORMAL cookie=0x3e7, duration=48.091s, table=0, n_packets=0, n_bytes=0, priority=900,ipv6,in_port="patch-provnet-b" actions=mod_dl_dst:3e:cc:28:d7:10:4e,NORMAL cookie=0x0, duration=255892.138s, table=0, n_packets=6997, n_bytes=1368211, priority=0 actions=NORMAL $ ip route show default default nhid 34 proto bgp src 172.30.2.2 metric 20 nexthop via 100.64.0.5 dev eth1 weight 1 nexthop via 100.65.2.5 dev eth2 weight 1 </code></pre> </li> </ol> <p>This example is for a VM on a provider network and applies as well to Floating IP addresses. However, for workloads in tenant networks, host routes are advertised from network and compute nodes using the Neutron gateway IP address as the next hop. From the gateway node, the traffic reaches the destination compute node through the Geneve tunnel (L3) as usual.</p> <h2>References</h2> <p>More information can be found at:</p> <ul> <li><a href="https://opendev.org/x/ovn-bgp-agent/src/commit/1fa471083c4fdbdac8d2781822c55eb7b8069fa2/doc/source/contributor/bgp_mode_design.rst">OVN BGP Agent upstream documentation</a></li> <li><a href="https://ltomasbo.wordpress.com/2021/02/04/ovn-bgp-agent-in-depth-traffic-flow-inspection/">OVN BGP Agent: In-depth traffic flow inspection blogpost</a></li> <li><a href="https://www.youtube.com/watch?v=eKH14UN856o">OpenInfra Summit Berlin '22 - Using BGP to interconnect workloads across clouds</a> (video)</li> <li><a href="https://www.youtube.com/watch?v=91daVTMt9AA">Devconf 2021 - Layer 3 Networking with BGP in hyperscale DCx</a> (video)</li> </ul> The post <a href="https://developers.redhat.com/articles/2022/09/22/learn-about-new-bgp-capabilities-red-hat-openstack-17" title="Learn about the new BGP capabilities in Red Hat OpenStack 17">Learn about the new BGP capabilities in Red Hat OpenStack 17</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Daniel Alvarez Sanchez2022-09-22T07:00:00ZThis Week in JBoss - 22 September 2022Romain Pelissehttps://www.jboss.org/people/romain-pelissedo-not-reply@jboss.comhttps://www.jboss.org/posts/weekly-2022-09-22.html2022-09-22T00:00:00Z<article class="" data-tags="quarkus,resteasy,kie,keycloak,wildfly"> <h1>This Week in JBoss - 22 September 2022</h1> <p class="preamble"></p><p><em>Hi everyone and welcome to the latest installment of JBoss editorial! Today’s stars of the show: Quarkus and KIE (Kogito/Drools)</em></p><p></p> <div class="sect1"> <h2 id="_quarkus">Quarkus</h2> <div class="sectionbody"> <p>Quarkus is quite busy this month! Just yesterday, the project released <a href="https://quarkus.io/blog/quarkus-2-12-3-final-released/">Quarkus 2.12.3.Final</a>, the third round of bugfixes and performance enhance of for the 2.12, which we mentioned in our previous editorial. But that’s not all, Quarkus tooling also got some love with the release of <a href="https://quarkus.io/blog/intellij-quarkus-tools-1.13.0/">Quarkus Tools for IntelliJ 1.13.0 released!</a>.</p> <p>Beyond the publication of new software and bugfixes, James Cobb also took the time to publish the 24th installment of the <a href="https://quarkus.io/newsletter/24/">Quarkus Newsletter</a>, a must-read for anyone who wants to follow or play with Quarkus! And to this point, an interesting new player has joined the project’s community: <a href="https://quarkus.io/blog/aphp-user-story/">Quarkus adoption by APHP (Assistance Publique des Hôpitaux de Paris)</a>!</p> <p>Of course, if you are already familiar with Quarkus, you may want something more technical to quench your thirst and Clément Escoffier has just the article for you: <a href="https://quarkus.io/blog/redis-job-queue/">How to implement a job queue with Redis</a>.</p> </div> </div> <div class="sect1"> <h2 id="_kie">KIE</h2> <div class="sectionbody"> <p>KIE community has been quite active too in the last days and produced quite an amount of interesting articles about their technology. First, we’ll suggest you’ll dive into this one about <a href="https://blog.kie.org/2022/09/creating-your-first-cloud-agnostic-serverless-application-with-java.html">Creating your first cloud-agnostic serverless application with Java</a>. It’s a good place to start!</p> <p>Another one, called <a href="https://blog.kie.org/2022/09/new-visualizer-for-the-serverless-workflow-editor.html">New visualizer for the Serverless Workflow Editor</a> provides a nice overview of this new tool and we’ll certainly learn more about it and use it. If you are more interested into technical details and implementation, you are in luck, there is a rather detailed overview of the <a href="https://blog.kie.org/2022/09/efesto-refactoring-technical-details.html">Efesto refactoring</a>.</p> <p>Wait, that’s not all! Check out this article, and the video it links to: <a href="https://blog.kie.org/2022/09/transparent-ml-integrating-drools-with-aix360.html">Transparent ML, integrating Drools with AIX360</a>!</p> </div> </div> <div class="sect1"> <h2 id="_techbytes">Techbytes</h2> <div class="sectionbody"> <p>If KIE and Quarkus have been the most prolific of the last two weeks, there is still a few more articles, coming from other projects, that you may want to check out: * <a href="http://www.mastertheboss.com/java/how-to-spot-java-bugs-with-spotbugs/">How to spot Java bugs with SpotBugs</a> * <a href="http://www.mastertheboss.com/jboss-frameworks/resteasy/getting-started-with-jakarta-restful-services/">Getting started with Jakarta RESTful Services</a> * <a href="https://infinispan.org/blog/2022/09/12/infinispan-14-console-wizard">Creating cache with wizard - Infinispan 14</a> * <a href="https://www.wildfly.org//news/2022/09/14/Remote-dev-watch/">Remote dev-watch development with WildFly Jar Maven Plugin</a> * <a href="https://blog.kie.org/2022/09/multiple-repositories-pull-request-chaos-crawl-them-all-in-one-single-place.html">Multiple repositories Pull Request chaos, crawl them all in one single place</a></p> </div> </div> <div class="sect1"> <h2 id="_releases_releases_releases">Releases, releases, releases…​</h2> <div class="sectionbody"> <p>As always, the JBoss community has been quite active and a few projects published new version in the last two weeks:</p> <div class="ulist"> <ul> <li> <p><a href="https://quarkus.io/blog/quarkus-2-5-3-final-released/">Quarkus 2.12.2.Final released</a> followed by <a href="https://quarkus.io/blog/quarkus-2-12-3-final-released/">Quarkus 2.12.3.Final</a></p> </li> <li> <p><a href="https://quarkus.io/blog/intellij-quarkus-tools-1.13.0/">Quarkus Tools for IntelliJ 1.13.0 released!</a></p> </li> <li> <p><a href="https://resteasy.dev/2022/09/08/resteasy-6.2.0.Beta1-release/">RESTEasy 6.2.0.Beta1 Release</a></p> </li> <li> <p><a href="https://www.keycloak.org/2022/09/keycloak-1902-released">Keycloak 19.0.2 released</a></p> </li> </ul> </div> </div> </div> <div class="sect1"> <h2 id="_decaf">Decaf'</h2> <div class="sectionbody"> <p>Feeling too jittery? Enough Java for now? Get refreshed with these two next articles about <strong>regular expressions</strong>:</p> <div class="ulist"> <ul> <li> <p><a href="https://developers.redhat.com/articles/2022/09/14/beginners-guide-regular-expressions-grep">A beginner’s guide to regular expressions with grep</a></p> </li> <li> <p><a href="https://developers.redhat.com/articles/2022/09/16/regex-how-quantifiers-pattern-collections-and-word-boundaries">Regex how-to: Quantifiers, pattern collections, and word boundaries</a></p> </li> </ul> </div> <p><em>That’s all for today! Please join us again next time for another round of our JBoss editorial!</em></p> </div> </div> <div class="author"> <pfe-avatar pfe-shape="circle" pfe-pattern="squares" pfe-src="/img/people/romain-pelisse.png"></pfe-avatar> <span>Romain Pelisse</span> </div></article>Romain PelisseEfesto refactoring &#8211; technical detailsGabriele Cardosihttps://blog.kie.org/2022/09/efesto-refactoring-technical-details.html2022-09-21T11:09:56ZThis post is meant as a description of the APIs and other technical details of the Efesto framework. It continues the introduction made in the BASE CONCEPTS. There are some concepts around which the APIs are implemented: * Generated resource * Unique identifier * Context of execution The framework provides and manage default implementations of the classes representing those concepts. Those classes could be extended by different engines for their specific needs (e.g. the Kie-drl compilation plugin define a context that contains a KnowledgeBuilderConfiguration) but this specific addition should never leak out of the engine itself, and the functionality of the framework itself should never rely on such "custom" details. GENERATED RESOURCE A represent the result of a compilation. By itself is just a marker interface because there are different kind of generated resources: * executable resources () * redirect resources () * “container” resources (like ). Executable resources represents the "entry point" for execution at runtime, and it contains information required to "instantiate" the executable unit. For some code-generation models (e.g. rules, predictions) this means store the class to instantiate at runtime, that will be used to start the evaluation. For models that does not rely on code-generation for execution (e.g. decisions), this resource contains the name of the class to be instantiated and/or the methods/parameters to be invoked. Redirect resources contains information needed to forward the execution request to a different engine, and it contains the informatio about the ewngine to be invoked. Container resources are meant to store other informations needed at runtime (e.g. the classes generated during compilation). UNIQUE IDENTIFIER The unique identifier () contains the information required to uniquely identify an executable or redirect generated resource. ModelLocalUriId contains information about: * the model/engine to invoke * the full path to the given resource The unique identifier is represented a "path" whose root is the model/engine to invoke, and the path describe all the elements required to get to the specific resource. Stateless engines (e.g. DMN, PMML) describe that as "/namespace/model_name" or "/filename/model_name". Statefull engines would require further path compoenents to identify the specific "state" to be invoked (e.g. "/drl/rule_base/session_name/session_identifier"). ModelLocalUriId is a property of both GeneratedExecutableResource and GeneratedRedirectResource, since both of them have to be retrieved during runtime execution. ModelLocalUriId implements and is a feature that was initially implemented in the Kogito Incubation API, for which an explanation is available . For each module, client code should be able to invoke a method like that to retrieve the unique identifier: ModelLocalUriId modelLocalUriId = appRoot("") .get(PmmlIdFactory.class) .get(fileNameNoSuffix, modelName); This is a fluent API, and each get invocation corresponds to an element in the generated path. The appRoot parameter is only used to differentiate multiple applications (e.g. in distributed context). The first get is needed to start the path building. Each module should implement its own factory extending , that, in turn, will be used to generate the full path. Each of the following get should return an object that extends ModelLocalUriId, since each it represent the path until that specific segment. Each module may provide its own strategy to define such paths, so each module may implement its own subclasses, depending on the needs. Since the The ModelLocalUriId constructor requires a instance, any of its subclasses should implement a way to call that constructor with such instance. In the following example: public class PmmlIdFactory implements ComponentRoot { public LocalComponentIdPmml get(String fileName, String name) { return new LocalComponentIdPmml(fileName, name); } } the PmmlIdFactory expose a get method ( the fluent API) that requires fileName and name parameters. This, in turns, are used to invoke the LocalComponentIdPmml constructor. public class LocalComponentIdPmml extends ModelLocalUriId { public static final String PREFIX = "pmml"; public LocalComponentIdPmml(String fileName, String name) { super(LocalUri.Root.append(PREFIX).append(fileName).append(name)); } } This snippet: LocalUri.Root.append(PREFIX).append(fileName).append(name) will lead to the creation of the following path: /{PREFIX}/{fileName}/{name} CONTEXT OF EXECUTION. The contains basic information about the current execution. It contains informations about the generated classes and the unique identifiers generated during compilation. is the specialization used at runtime to retrieve the generated classes. is the default implementation. Engines may extends the above as per their needs. For example, (the EfestoCompilationContext used inside the rule engine) defines KnowledgeBuilderConfiguration for its needs. COMPILATION CONTEXT is the specialization used at compile time, and it is used to store the classes generated during compilation. is the default implementation. provide a static method to retrieve the default implementation () with all the classes eventually compiled from a previous compilation. That static method, behind the scenes, invokes the constructor that scan the classloader to look for efesto-related classes. RUNTIME CONTEXT is the specialization used at runtime to retrieve the generated classes. is the default implementation. provide a static method to retrieve the default implementation () with all the efesto-related compiled classes. That static method, behind the scenes, invokes the constructor that scan the classloader to look for efesto-related classes. PUBLIC APIS The framework consists basically of two set of APIs, the "compilation" and the "runtime" ones. Those APIs are defined inside and . Those are the APIs that "client code" is expected to invoke. Said differently, "client code" is expected to interact with engines only through those APIs. COMPILATION API void processResource(EfestoCompilationContext context, EfestoResource... toProcess); This is the method that "External applications" (e.g. kie-maven-plugin) should invoke to create executables units out of given models. is the DTO wrapping a single model to be processed. Its only method T getContent(); is invoked by the compilation manager to get the object to be processed. The more common usage is to provide an actual File to the compilation manager, in which case there already is an implementation, . is a specific abstract implementations that wraps a Set of models. As for the previous, there already exist an implementation to manage FIles, . RUNTIME API Collection<EfestoOutput> evaluateInput(EfestoRuntimeContext context, EfestoInput... toEvaluate); This is the method that "External applications" (e.g. kogito execution) should invoke to retrieve a result out of executable units generated at compile-time. is the DTO wrapping a the data to be evaluated and the unique identifier of the executable units. It has two methods: ModelLocalUriId getModelLocalUriId(); T getInputData(); the former returns the unique identifier of the executable units; the latter returns the data to use for evaluation. Currently there are no "default" implementations of it, since the input structure is generally model-specific; so, every plugin should provide its own implementation. INTERNAL APIS Behind the scenes, when CompilationManager and RuntimeManager receives a request, they scan the classloader for engine plugins. Such plugins should implement, respectively, the and the . COMPILERSERVICE API declares three methods: boolean canManageResource(EfestoResource toProcess); List<E> processResource(EfestoResource toProcess, U context); String getModelType(); The first one is invoked by the CompilationManager to verify if the specific implementation is able to manage the given resource. The evaluation could be based on the actual type of the resource, on some details of the content, or on a mix of them. It is responsibility of the implementation to find the appropriate logic. The only requirement to keep in mind is that, during execution, there should be at most one implementation that return true for a given EfestoResource, otherwise an exception is thrown. The following snippet is an example where a given EfestoResource is considered valid if it is an DecisionTableFileSetResource: @Override public boolean canManageResource(EfestoResource toProcess) { return toProcess instanceof DecisionTableFileSetResource; } The above implementation works because DecisionTableFileSetResource is a class specifically defined by the plugin itself, so there are no possible "overlaps" with other implementations. On the other side, the following snippet is an example where a given EfestoResource is considered valid if it is an EfestoFileResource and if the contained model is a PMML: @Override public boolean canManageResource(EfestoResource toProcess) { return toProcess instanceof EfestoFileResource &amp;& ((EfestoFileResource) toProcess).getModelType().equalsIgnoreCase(PMML_STRING); } In this case, the actual class of EfestoResource is not enough, since EfestoFileResource is one of the default implementations provided by the framework. So, a further check is needed, that is about the model that is wrapped in the resource. A single plugin may manage multiple representations of the same model. For example, a plugin may manage both an EfestoFileResource and an EfestoInputStreamResource. There are different possible strategies to do that. For example, the plugin may provide one single "compilation-module" with two classes implementing the KieCompilerService; or it may define two "compilation-modules", each of which with one implementation, or one single class may manage both kind of inputs. Again, this is responsibility of the plugin itself. This also push toward code reusage. For a given model, there could be a common path that provide the final compilation output, and different entry point depending on the model representation. It is so possible that multiple compilation models creates a compilation output that, in turns, it is also an EfestoResource. Then, there could be another implementation that accept as input the above intermediate resuorce, and transform it to the final compilation outpout. This chaining is managed by the efesto framework out of the box. An example of that is featured by drools-related pmml models. During compilation, the PMML compiler generates an that is both an EfestoResource and an EfestoCompilationOutput. When the CompilationManager retrieves that compilation output, being it an EfestoResource, scans the plugins to find someone that is able to compile it. The fullfill this requirement, and proceed with drl-specific compilation. One thing to notice here is that different modules should limit as much as possible direct dependency between them. The second method is invoked by the compilation manager if the previous one returned true. That method receives also an EfestoCompilationContext as parameter. Code-generating implementations should rely on that context for compilation and classloading. The third method is used by the framework to discover, at execution time, which models can actually be managed. Thanks to that method, there is a complete de-coupling between the framework and the implementation themselves, since the framework can discover dynamically the available models, and every plugin may freely define its own model. Last critical bit is that every compilation module should contain an org.kie.efesto.compilationmanager.api.service.KieCompilerService file inside src/main/resources/META-INF directory, and that file should contain all the KieCompilationService implementations provided by that module. RUNTIMESERVICE API declares three methods: boolean canManageInput(EfestoInput toEvaluate, K context); Optional<E> evaluateInput(T toEvaluate, K context); String getModelType(); The first one is invoked by the RuntimeManager to verify if the specific implementation is able to manage the given input. The evaluation could be based on the actual type of the resource, on some details of the content, or on a mix of them. It is responsibility of the implementation to find the appropriate logic. The only requirement to keep in mind is that, during execution, there should be at most one implementation that return true for a given EfestoInput, otherwise an exception is thrown. The following snippet is an example where a given EfestoInput is considered valid if it is an EfestoInputPMML and the given identifier has already been compiled: public static boolean canManage(EfestoInput toEvaluate, EfestoRuntimeContext runtimeContext) { return (toEvaluate instanceof EfestoInputPMML) &amp;& isPresentExecutableOrRedirect(toEvaluate.getModelLocalUriId(), runtimeContext); } The above implementation works because EfestoInputPMML is a class specifically defined by the plugin itself, so there are no possible "overlaps" with other implementations. The difference with the compilation side is that the KieRuntimeService implementation should also check that the model related to the given unique identifier has already been compiled. A single plugin may manage different types of input for the same model. For example, the rule plugin may manage both an EfestoInputDrlKieSessionLocal and an AbstractEfestoInput that contains an EfestoMapInputDTO. There are different possible strategies to do that. For example, the plugin may provide one single "runtime-module" with two classes implementing the KieRuntimeService; or it may define two "runtime-modules", each of which with one implementation, or one single class may manage both kind of inputs. Again, this is responsibility of the plugin itself. This also push toward code reusage. For a given model, there could be a common code-path that provides the final runtime result, and different entry point depending on the input format. It is so possible that a runtime implementation would need a result from another implementation. In that case, the calling runtime will create a specifically-crafted EfestoInput and will ask the RuntimeManage the result for it. This chaining is managed by the efesto framework out of the box. An example of that is featured by drools-related pmml models. During execution, the PMML runtime generates an EfestoInput<EfestoMapInputDTO> and send it to the RuntimeManager. The RuntimeManager scans the plugins to find someone that is able to execute it. The fullfill this requirement, and proceed with drl-specific execution. One thing to note here is thet modules should limit as much as possible direct dependency between them! The second method is invoked by the runtime manager if the previous one returned true. That method receives also an EfestoRuntimeContext as parameter. Code-generating implementations should rely on that context to retrieve/load classes generated during compilation. The third method is used by the framework to discover, at execution time, which models can actually be managed. Thanks to that method, there is a complete de-coupling between the framework and the implementation themselves, since the framework can discover dynamically the available models, and every plugin may freely define its own model. Last critical bit is that every compilation module should contain an org.kie.efesto.runtimemanager.api.service.KieRuntimeService file inside src/main/resources/META-INF directory, and that file should contain all the KieRuntimeService implementations provided by that module. CONCLUSION This post was meant to provide more technical details on what have been introduced in the . Following ones will provide concrete step-by-step tutorial and real uses-cases so… stay tuned!!! The post appeared first on .Gabriele CardosiBind services created with AWS Controllers for KubernetesBaiju Muthukadanea05a1b5-94fc-4f41-9f45-43a331d6a4c72022-09-21T07:00:00Z2022-09-21T07:00:00Z<p>Application developers can define Amazon Web Services (AWS) resources directly from Kubernetes using <a href="https://aws-controllers-k8s.github.io/community/docs/community/overview/">AWS Controllers for Kubernetes</a> (ACK). You can use the <a href="https://redhat-developer.github.io/service-binding-operator/userguide/intro.html">Service Binding Operator</a> to easily connect applications to any AWS service provisioned through ACK.</p> <p>This article explores the connection with an RDS database and demonstrates configuring ACK to create a service instance for the AWS Relational Database Service (RDS). You can also learn how to use Service Binding Operator annotations to bind a PostgreSQL service created using RDS and a REST API.</p> <h2>Benefits of the Service Binding Operator and AWS Controllers for Kubernetes </h2> <p>One benefit of the Service Binding Operator and ACK is that they streamline the formation of a connection. The Service Binding Operator implements the <a href="https://servicebinding.io">Service Binding specification for Kubernetes</a>. This is a Kubernetes-wide specification for automating the process of service secrets communicating to workloads.</p> <p>Another benefit of using the Service Binding Operator is that the only focus of applications with many microservices (maybe hundreds of them) is setting the correct label to receive binding data from the services specified by Service Binding Operator resources using the <a href="https://redhat-developer.github.io/service-binding-operator/userguide/binding-workloads-using-sbo/binding-options.html#binding-workloads-using-a-label-selector">label selector</a>.</p> <p>The Service Binding Operator supports the following methods to obtain connection details from a service:</p> <ul> <li><a href="https://github.com/servicebinding/spec#provisioned-service">Provisioned Service</a></li> <li><a href="https://github.com/servicebinding/spec#direct-secret-reference">Direct Secret Reference</a></li> <li><a href="https://redhat-developer.github.io/service-binding-operator/userguide/exposing-binding-data/adding-annotation.html">Annotations</a></li> </ul> <p>Currently, ACK does not support the Provisioned Service method. And no single secret contains all the connection details. In such a scenario, you can use the annotation support provided by the Service Binding Operator and add this annotation to a Custom Resource (CR) or Custom Resource Definition (CRD).</p> <p>The following articles offer more information about ACK, including where the ACK project came from, why the Operator pattern is used, and how to configure and use ACK:</p> <ul> <li><a href="https://developers.redhat.com/articles/2022/05/16/how-use-operators-aws-controllers-kubernetes">How to use Operators with AWS Controllers for Kubernetes</a></li> <li><a href="https://developers.redhat.com/articles/2022/05/24/create-aws-resources-kubernetes-and-operators">Create AWS resources with Kubernetes and Operators</a></li> </ul> <h2>Step 1:  Prerequisites setup</h2> <p>The prerequisites for this demonstration are pretty simple. You must have an AWS account and a <a href="https://developers.redhat.com/openshift">Red Hat OpenShift</a> cluster with the Service Binding Operator installed.</p> <h3>AWS account permissions</h3> <p>Your AWS account must have the <a href="https://aws-controllers-k8s.github.io/community/docs/user-docs/authorization/#aws-iam-permissions-for-ack-controller">IAM role permissions</a> for the Amazon Relational Database Service (RDS) ACK controller. The policy required for RDS is:</p> <p><code>arn:aws:iam::aws:policy/AmazonRDSFullAccess</code></p> <h3>OpenShift cluster with the Service Binding Operator</h3> <p>You need administrator access to an OpenShift cluster. To install the Sevice Binding Operator, create a subscription similar to this example:</p> <pre> <code class="java">apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: my-service-binding-operator namespace: openshift-operators spec: channel: stable name: rh-service-binding-operator source: redhat-operators sourceNamespace: openshift-marketplace</code></pre> <p>For example, place this configuration in a file named <code>subscription.yaml</code>. Then use the following <code>oc</code> command to create the resource:</p> <pre> <code class="language-bash">$ oc apply -f subscription.yaml</code></pre> <p>Alternatively, you can install the Service Binding Operator from <a href="https://operatorhub.io">OperatorHub</a> using the OpenShift administrator console.</p> <h2>Step 2:  Install the RDS Operator in an OpenShift cluster</h2> <p>These four steps use the ACK Operator to install the RDS database. The official documentation shows detailed information about configuring ACK in an OpenShift cluster.</p> <h3>1. Create a namespace</h3> <p>The following example uses a namespace called <code>ack-system</code>:</p> <pre> <code class="language-bash">$ oc new-project ack-system</code></pre> <p>This is the output you should see:</p> <pre> <code class="java">Now using project "ack-system" on server "https://example.org:6443". ...</code></pre> <h3>2. Create a config map</h3> <p>Create a config map with the following content in a <code>config.txt</code> file:</p> <pre> <code class="java">ACK_ENABLE_DEVELOPMENT_LOGGING=true ACK_LOG_LEVEL=debug ACK_WATCH_NAMESPACE= AWS_REGION=us-west-2 AWS_ENDPOINT_URL= ACK_RESOURCE_TAGS=hellofromocp</code></pre> <p>Use this config map in your OpenShift cluster as follows:</p> <pre> <code class="language-bash">$ oc create configmap --namespace ack-system \ --from-env-file=config.txt ack-rds-user-config </code></pre> <h3>3. Create a secret</h3> <p>Save the following authentication values in a file, such as <code>secrets.txt</code>:</p> <pre> <code class="java">AWS_ACCESS_KEY_ID=<access key id> AWS_SECRET_ACCESS_KEY=<secret access key></code></pre> <p>Use this <code>secrets.txt</code> file to create a secret in your OpenShift cluster as follows:</p> <pre> <code class="language-bash">$ oc create secret generic \ --namespace ack-system \ --from-env-file=secrets.txt ack-rds-user-secrets</code></pre> <p class="Indent1"><strong>Note</strong>: Be sure to secure access to this resource and the namespace because you will keep sensitive information in this secret—your AWS Access Key ID and AWS Secret Access Key.</p> <p>Alternatively, you can set up secure access using <a href="https://aws-controllers-k8s.github.io/community/docs/user-docs/irsa/#create-an-iam-role-for-your-ack-service-controller">IAM Roles for Service Accounts</a> (IRSA).</p> <h3>4. Install the relational database service</h3> <p>Refer to the article <a href="https://developers.redhat.com/articles/2022/05/24/create-aws-resources-kubernetes-and-operators">How to get Operators to use AWS Controllers for Kubernetes</a> for ACK RDS controller installation instructions. After successful installation, this page (Figure 1) appears in the administrator console.</p> <figure role="group"> <div class="rhd-c-figure"> <article class="media media--type-image media--view-mode-article-content-full-width"> <div class="field field--name-image field--type-image field--label-hidden field__items"> <a href="https://developers.redhat.com/sites/default/files/blog-ack.png" data-featherlight="image"><img src="https://developers.redhat.com/sites/default/files/styles/article_full_width_1440px_w/public/blog-ack.png?itok=nbjyUns8" width="1440" height="710" alt="This page appears in the OpenShift administrator console after installation." loading="lazy" typeof="Image" /> </a> </div> <div class="field field--name-field-caption field--type-string field--label-hidden field__items"> <div class="rhd-c-caption field__item"> Figure 1: After the ACK RDS controller is installed, this page appears in the OpenShift administrator console. </div> </div> </article> </div> <figcaption class="rhd-c-caption"></figcaption> </figure> <h2>Step 3:  The consumption of annotations and label selectors</h2> <p>To enable binding, the Service Binding Operator uses the following annotations that are part of the <code>DBInstance</code> resource in a <a href="https://helm.sh">Helm chart</a>:</p> <pre> <code class="java">apiVersion: rds.services.k8s.aws/v1alpha1 kind: DBInstance metadata: annotations: "service.binding/type": "path={.spec.engine}" "service.binding/provider": "aws" "service.binding/host": "path={.status.endpoint.address}" "service.binding/port": "path={.status.endpoint.port}" "service.binding/username": "path={.spec.masterUsername}" "service.binding/password": 'path={.spec.masterUserPassword.name},objectType=Secret,sourceKey=password' "service.binding/database": "path={.spec.engine}" ...</code></pre> <p>The <code>DBInstance</code> definition represents an AWS RDS resource.</p> <p>To define the workload, the Service Binding Operator uses the following label selector (part of the <code>ServiceBinding</code> resource in the Helm chart):</p> <pre> <code class="java">apiVersion: binding.operators.coreos.com/v1alpha1 kind: ServiceBinding metadata: name: servicebinding-rds-endpoint-demo spec: bindAsFiles: true services: - group: rds.services.k8s.aws version: v1alpha1 kind: DBInstance name: {{ .Values.dbinstance.name }} application: labelSelector: matchLabels: psql.provider: aws (*) version: v1 group: apps resource: deployments</code></pre> <p class="Indent1">(*) This line specifies the label that the Service Binding Operator uses to identify the workload.</p> <p>The Helm charts are available in the <a href="https://github.com/redhat-developer/openshift-app-services-demos">app-services-samples repository</a>.</p> <p>We have not deployed the application yet. Typically, the ServiceBinding controller waits for a workload resource with a matching <code>psql.provider: aws</code> label. As soon as a workload resource is available with the matching label, the Operator uses the ServiceBinding controller to project the binding values to the workload.</p> <p>The binding values projects into the <code>/bindings</code> directory inside the container of the workload resource. The following directory structure stores the values:</p> <pre> <code class="java">/bindings └── servicebinding-rds-endpoint-demo ├── type ├── database ├── host ├── username └── password</code></pre> <p>The REST API application uses a suitable and compliant <a href="https://servicebinding.io/application-developer/#language-specific-libraries">library</a> to consume the projected binding values.</p> <h2>Step 4:  Create a database instance</h2> <p>After you clone the <a href="https://github.com/redhat-developer/openshift-app-services-demos">app-services-samples repository</a> described in the previous section, change to the <code>openshift-app-services-demos/samples/sbo/ack-rds-blog</code> directory to perform these two steps:</p> <p>1. Run Helm on the <code>rds-postgre-chart-demo</code> chart:</p> <pre> <code class="language-bash">$ helm install rds-postgre-chart-demo -n ack-system rds-postgre-chart-demo</code></pre> <p>This is the output you should see:</p> <pre> <code class="java">NAME: rds-postgre-chart-demo LAST DEPLOYED: Thu Aug 4 09:29:26 2022 NAMESPACE: ack-system STATUS: deployed REVISION: 1 TEST SUITE: None</code></pre> <p>2. Run the following command to validate the database instance:</p> <pre> <code class="language-bash">$ kubectl get dbinstance rds-test-demo -n ack-system -o=jsonpath='{.status.dbInstanceStatus}'</code></pre> <p>Output:</p> <pre> <code class="java">available</code></pre> <p>Now the database is ready to use.</p> <h2>Step 5:  Deploy the REST API application</h2> <p>In this demo, we use the Software Security Module (SSM), a Go-based REST API application. For convenience, deploy the application using the Helm chart in the <a href="https://github.com/redhat-developer/openshift-app-services-demos">app-services-samples repository</a>. After you clone the repository, perform the following steps from the <code>openshift-app-services-demos/samples/sbo/ack-rds-blog</code> directory.</p> <p>1. Run Helm on the <code>ssm-chart</code> chart:</p> <pre> <code class="language-bash">$ helm install ssm-chart -n ack-system ssm-chart</code></pre> <p>Output:</p> <pre> <code class="java">NAME: ssm-chart LAST DEPLOYED: Thu Aug 4 04:22:24 2022 NAMESPACE: ack-system STATUS: deployed REVISION: 1 TEST SUITE: None</code></pre> <p>2. Verify that the deployment of the REST API application is successful by running:</p> <pre> <code class="language-bash">$ kubectl get deployment -n ack-system</code></pre> <p>Output:</p> <pre> <code class="java">NAME READY UP-TO-DATE AVAILABLE AGE ack-rds-controller 1/1 1 1 28m</code></pre> <p>The deployment is defined as follows in the Helm chart:</p> <pre> <code class="java">apiVersion: apps/v1 kind: Deployment metadata: name: {{ .Values.k8Name }} annotations: app.kubernetes.io/part-of: ssm labels: psql.provider: aws (*) ...</code></pre> <p>(*) This line specifies the required matching label that the ServiceBinding controller uses to identify the workload and project the bindings.</p> <p>The ServiceBinding controller watches for a deployment matching the label. After the deployment is ready, the Operator uses the ServiceBinding controller to project the binding values to the workload.</p> <h2>Step 6:  Access and validate the REST API application</h2> <p>The <code>ssm-chart</code> Helm chart also creates an <code>ssm</code> service resource for convenient access to the application. The <code>ssm</code> service resource points to the REST API application. Before connecting to this application, make sure you have the <code>DBInstance</code> resource created and ready with an RDS instance provisioned in the AWS.</p> <p>Switch to another terminal to run the commands in the following steps.</p> <h3>1. Access the REST API application by forwarding the port of the service</h3> <p>An <code>oc</code> command on OpenShift is useful for port forwarding:</p> <pre> <code class="language-bash">$ oc port-forward --address 0.0.0.0 svc/ssm 8080:8080 -n ack-system</code></pre> <h3>2. Validate the application</h3> <p>Validate that the application works as follows:</p> <h4>Generate a based64-encoded string</h4> <p>Start by creating a string from random input:</p> <pre> <code class="language-bash">$ openssl rand 32 | base64</code></pre> <p>This output contains the string you will use as input in the next step.:</p> <pre> <code class="java">rgeR0ENzlxG+Erss6tw0gBkBWdLOPrQhEFQpH8O5t/Y=</code></pre> <p> </p> <h4>Call the wrap API</h4> <p>Call the application's <code>wrap</code> API to create a cipher from the string by using the based64-encoded string from the previous step as input when calling the <code>wrap</code> API:</p> <pre> <code class="language-bash">$ curl http://localhost:8080/wrap -d '{"key": "rgeR0ENzlxG+Erss6tw0gBkBWdLOPrQhEFQpH8O5t/Y="}'</code></pre> <p>This output contains the cipher string you will use as input in the next step:</p> <pre> <code class="java">{"cipher":"D/S6wDJPH ... "}</code></pre> <p> </p> <h4>Call the unwrap API</h4> <p>Now call the application's <code>unwrap</code> API to restore the original based64 -encoded string by submitting the JSON from the output in the previous section to the <code>unwrap</code> API:</p> <pre> <code class="language-bash">$ curl http://localhost:8080/unwrap -d '{"cipher":"D/S6wDJPH ... "}'</code></pre> <p>The output returns the original based64-encoded string:</p> <pre> <code class="java">{"key":"rgeR0ENzlxG+Erss6tw0gBkBWdLOPrQhEFQpH8O5t/Y="} </code></pre> <p> </p> <h2>The Service Binding Operator simplifies installation and deployment</h2> <p>With the annotation support of the Service Binding Operator, you can easily bind ACK services without making any changes to the code. You can use the same label to bind any number of workloads. The REST API application consumes the projected binding values by using one of the <a href="https://servicebinding.io/application-developer/#language-specific-libraries">libraries</a> compliant with the Service Binding specification for Kubernetes. You can use the REST API application to connect to the AWS RDS service without any specific change.</p> The post <a href="https://developers.redhat.com/articles/2022/09/21/bind-services-created-aws-controllers-kubernetes" title="Bind services created with AWS Controllers for Kubernetes">Bind services created with AWS Controllers for Kubernetes</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Baiju Muthukadan2022-09-21T07:00:00ZNew visualizer for the Serverless Workflow EditorRoger Pallejahttps://blog.kie.org/2022/09/new-visualizer-for-the-serverless-workflow-editor.html2022-09-21T00:08:57ZWe’re happy to announce that a new diagram visualizer for the domain has been released, as part of the kogito tooling 0.23.0, and It becomes as default for the . Kogito – Serverless Workflow Editor – VSCode extension If you are not familiar with the kogito tooling and its extensions, please refer to the guide first. A part from the previous capabilities of the editor, this new diagram visualizer provides a bunch of additional features to help users during the authoring of their workflows, such as: * Automatic workflow reloading It dynamically reloads the workflow’s visualization, once any change is being done in the JSON declaration text panel. * Error Handling In case the workflow’s JSON declaration is not valid (thus the workflow cannot be automatically reloaded), the editor presents the latest valid visualization for the workflow, and also an icon appears on the top right corner: On mouse over the error icon, it  displays the cause of the error as well, by showing a user friendly message. Once the diagram is valid again, the error icon will disappear, and the visualization will be properly updated. * State navigation Once an state is being selected in the diagram visualizer (by clicking on it), the editor automatically navigates to the line, in the JSON declaration, where the state is being defined. * Mediators Users are able to play with mediators either by using the mouse, or by using the available buttons in the mediators bar: * Auto-fit to diagram size: It fits the diagram to the actual viewport size * Zoom: Scales the viewport accordingly (also available by using mouse mediators, please see the keybindings page) * Panning: Translates the viewport accordingly (only available by using mouse mediators, please see the keybindings page) * Export  workflow to SVG From the technical perspective, just mention it is based on , and it relies on Canvas as the main rendering technology. Please keep posted on further updates, new features and improvements coming soon! The post appeared first on .Roger PallejaQuarkus 2.12.3.Final releasedGuillaume Smethttps://quarkus.io/blog/quarkus-2-12-3-final-released/2022-09-21T00:00:00ZToday, we released Quarkus 2.12.3.Final, with a new round of bugfixes and documentation improvements. It is a safe upgrade for anyone using 2.12. Migration Guide If you are not already using 2.12, please refer to our migration guide. Full changelog You can get the full changelog of 2.12.3.Final on GitHub....Guillaume SmetTransparent ML, integrating Drools with AIX360Matteo Mortarihttps://blog.kie.org/2022/09/transparent-ml-integrating-drools-with-aix360.html2022-09-20T12:10:33ZFollowing up from about integrating Drools with the Open Prediction Service, in this new post we want to share the current results from another exploration work: this time integrating Drools with research on Transparent Machine Learning by IBM. INTRODUCTION Transparency is a key requirement in many business sectors, from FSI (Financial Services Industry), to Healthcare, to Government institutions, and many others. In more recent years, a generalized need for increased transparency in the decision making processes has gained a great deal of attention from several different stakeholders, especially when it comes to automated decisioning and AI-based decision services. Specifically in the Eurozone, this ties with the and the requirement for explainability in the way businesses automate processes and decision making. Additionally, an “” is proposed and currently under discussion at the European Commission: under the current status of the proposal several risk levels are identified. The integration of AI in the business process and decision model will likely require explainability, transparency and a conformity assessment, depending on the applicable risk level: In other parts of the world, similar legislations are coming into effect or are currently being proposed. You can read more details in . With these considerations in mind, we will explore how to leverage rule induction strategies and specific types of machine learning models, with the intent of producing predictive models which can integrate with effective results into this general context. TRANSPARENT ML WITH DROOLS AND AIX360 One way to address some of the problems and requirements highlighted in the previous section is to use Machine Learning to generate specific types of models that are inherently readable and transparent. As we will see in this blog post, a transparent predictive model can be handed over easily to the next phase as a decision model, in order to be evaluated as-is, but most importantly for the ability to be inspected and authored directly! Comparing a Transparent ML approach with the broader general Machine Learning, we can highlight some of its characteristics: General Machine Learning evaluation:Transparent ML approach:All supported model types, but black box evaluationModel can be inspected, authored, evaluatedAccuracy focusedTransparency focusedeXplainable AI complements, such as Intrinsically eXplainableMLOps —governed by data scienceBusiness centric governanceMultiple runtimesPotentially single runtime Naturally the transparent ML approach has its limitations; we will discuss alternative approaches in the conclusions of this blog post. An example pipeline can be summarized as follows: For the examples in this blog post, we will use the dataset  (predicting if income exceeds $50K/yr from census data). Let’s get started! RULE SET INDUCTION In this section we will make use of the , an open-source library that supports interpretability and explainability of datasets and machine learning models. Our goal in this phase is to generate a predictive model from the UCI Adult dataset, using Machine Learning techniques: To generate a transparent predictive model, we can drive the generation of a RuleSet , as explained in the following Jupyter notebook : As a result of this, we have now generated a set of rules, in the form of a PMML RuleSet, which represents the transparent predictive model for the Adult dataset: If you are interested to delve into more details about using AIX360 and related algorithms, you can check out . DROOLS In this section, we will transform the result from the previous steps into an executable decision model, which can also be directly authored. Please note: in a different context, where the only requirement is the execution of predictive models in general, you can simply make reference to the PMML support for Drools from the , or to integration blueprints such as the integration of Drools with IBM Open Prediction Service from a . In this article instead, as premised, we’re interested in the result of a transparent prediction model, which can be fully inspected, authored and (naturally!) evaluated. Specifically, we will transform the transparent predictive model serialized as a RuleSet, into a DMN model with DMN Decision Tables. To perform this transformation, we will make use of the kie-dmn-ruleset2dmn utility; this is available as a developer API, and as a command line utility too. You can download a published version of the command line utility (executable .jar) from ; otherwise, you can lookup a more recent version directly from . To transform the RuleSet file into a DMN model, you can issue the following command: $ java -jar kie-dmn-ruleset2dmn-cli-8.27.0.Beta.jar adult.pmml --output=adult.dmn This will result in a .dmn file generated, which you can author with the Kogito Tooling and evaluate as usual with the ! We can upload the generated .dmn file onto the sandbox: We can make use of the Kie Sandbox extended services, to evaluate locally the DMN model, as-is or authored as needed! It’s interesting to note the static analysis of the DMN decision table identifies potential gaps in the table, and subsumptions in the rules inducted during the Machine Learning phase; this is expected, and can be authored directly depending on the overall business requirements. From the model evaluation perspective, overlapping rules are not a problem, as they would evaluate to the same prediction; this is a quite common scenario when the ML might have identified overlapping “clusters” or grouping over a number of features, leading to the same output. From a decision table perspective however, overlapping rules can be simplified, as a more compact representation of the same table semantic is often preferable in decision management. Here it is up to the business to decide if to keep the table as translated from the original predictive model, or to leverage the possibilities offered by the transparent ML approach, and simplify/compact the table for easier read and maintenance by the business analyst. DEPLOY We can deploy directly from the KIE Sandbox: Our Transparent prediction and decision model is available as a deployment on OpenShift ! As you can see, with just the click of a button in the KIE Sandbox, our transparent ML model has been easily deployed on OpenShift. If you want to leverage the serverless capabilities of Knative for auto-scaling (including auto scale to zero!) for the same predictive model, you can consider packaging it as a Kogito application. You can find more information in this . CONCLUSION We have seen how a Transparent ML approach can provide solutions to some of the business requirements and conformance needs to regulations such as GDPR or AI Act; we have seen how to drive rule induction by generating predictive models which are inherently transparent, can be authored directly as any other decision model, and can be deployed on a cloud-native OpenShift environment. In this post, we have focused ourselves on using directly upstream AIX360 and Drools. You can refer to the above diagram for commercial solutions by IBM and Red Hat that include these projects too, such as , , . If you are interested in additional capabilities for eXplainable AI solutions, check-out the ! The Transparent ML predictive model, now available as a decision service, can be integrated in other DMN models and other applications, as needed. For example, the transparent prediction on the Adult dataset (predicting if income exceeds $50K/yr) could become invocable as part of another decision service that decides on the applicability for the requests of issuing a certain type of credit card. Another possible integration could be to employ a transparent ML predictive model in the form of scorecards, inside a broader DMN model for segmentation; that is, first identify the applicable category/segment based on the input data, and then apply one of several score cards for the specific segment. Don’t miss on checking out the on related Transparent ML topics! Hope you have enjoyed this blog post, showcasing integration of several technologies to achieve a transparent ML solution! Questions? Feedback? Let us know with the comment section below! Special thanks for Greger Ottosson and Tibor Zimanyi for their help while crafting this content. The post appeared first on .Matteo MortariHow hashing and cryptography made the internet possibleAndy Oram60312bd5-c40d-4f54-9a4e-fbce728d85182022-09-20T07:00:00Z2022-09-20T07:00:00Z<p>A lot of technologies, business choices, and public policies gave us the internet we have today—a tremendous boost to the spread of education, culture, and commerce, despite its well-documented flaws. But few people credit two deeply buried technologies for making the internet possible: hashing and cryptography.</p> <p>If more people understood the role these technologies play, more money and expertise would go toward uncovering and repairing security flaws. For instance, we probably would have fixed the <a href="http://heartbleed.com/">Heartbleed</a> programming error much earlier and avoided widespread vulnerabilities in encrypted traffic.</p> <p>This article briefly explains where hashing and cryptography come from, how they accomplish what they do, and their indelible effect on the modern internet.</p> <h2>Hashing</h2> <p>Hashing was <a href="https://www.geeksforgeeks.org/importance-of-hashing/">invented in the 1950s</a> at the world's pre-eminent computer firm of that era, IBM, by Hans Peter Luhn. What concerned him at the time was not security—how many computer scientists thought about that?—but saving disk space and memory, the most costly parts of computing back then.</p> <p>A <em>hash</em> is a way of reducing each item of data to a small, nearly unique, semi-random string of bits. For instance, if you are storing people's names, you could turn each name into the numerical value of the characters and run a set of adds, multiplies, and shift instructions to produce a 16-bit value. If the hash is good, there will be very few names that produce the same 16-bit value—very few <em>collisions</em>, as that situation is called.</p> <p>Now suppose you want to index a database for faster searching. Instead of indexing the names directly, it's much simpler and more efficient to make the index out of 16-bit values. That was one of the original uses for hashes. But they turned out to have two properties that make them valuable for security: No one can produce the original value from the hash, and no one can substitute a different value that produces the same hash. (It is theoretically possible to do either of those things, but doing so would be computationally infeasible, so they're impossible in practice.)</p> <p>Early Unix systems made use of this property to preserve password security. You created a password along with your user account and gave it to the computer, but the operating system never stored the password itself—it stored only a hash. Every time you entered your password after that, the operating system ran the hash function and let you log in if the resulting hash matched the one in the system. If the password file were snatched up by a malicious intruder, all they would get is a collection of useless hashes. (This clever use of hashes eventually turned out not to be secure enough, so it was replaced with <em>encryption,</em> which we'll discuss in more detail in the next section of this article.)</p> <p>Hashes are also good for ensuring that no one has tampered with a document or software program. Injecting malware into free software on popular repositories is not just a theoretical possibility—<a href="https://github.blog/2022-05-26-npm-security-update-oauth-tokens/">it can actually happen</a>. Therefore, every time a free software project releases code, the team runs it through a hash function. Every user who downloads the software can run it through the same function to make sure nobody has intercepted the code and inserted malware. If someone changed even one bit and ran the hash function, the resulting hash would be totally different.</p> <p>Git is another of the myriad tools that use hashes to ensure the integrity of the repository, as well as to enable quick checks on changes to the repository. You can see a hash (a string of random characters) each time you issue a push or log command:</p> <pre> <code class="language-java">commit 2de089ad3f397e735a45dda3d52d51ca56d8f19a Author: Andy Oram <andyo@example.com> Date: Sat Sep 3 16:28:41 2022 -0400 New material related to commercialization of cryptography. commit f39e7c87873a22e3bb81884c8b0eeeea07fdab48 Author: Andy Oram <andyo@example.com> Date: Fri Sep 2 07:47:42 2022 -0400 Fixed typos. </code></pre> <p>Hash functions can be broken, so <a href="https://valerieaurora.org/hash.html">new ones are constantly being invented</a> to replace the functions that are no longer safe.</p> <h2>Cryptography</h2> <p>Mathematically speaking, the goal of cryptography has always been to produce output where each bit or character has an equal chance of being another character. If someone intercepted a message and saw the string "xkowpvi," the "x" would have an equal chance of representing an A, a B, a C, and so on.</p> <p>In digital terms, every bit in an encrypted message has a 50% chance of representing a 0 and a 50% chance of representing a 1.</p> <p>This goal is related to hashing, and there is a lot of overlap between the fields. Security experts came up with several good ways to create encrypted messages that couldn't be broken—that is, where the decryption process would be computationally infeasible without knowing the secret key used to encrypt the message. But for a long time these methods suffered from an "initial exchange" problem: The person receiving the message needed to somehow also learn what that secret encryption key was, and learn it in a way that didn't reveal the key to anybody else. Whether you're a spy in World War II Berlin trying to communicate with your U.S. buddies, or a modern retail site trying to confirm a customer's credit card online, getting the shared secret securely is a headache.</p> <p>The solution by now is fairly familiar. The solution creates a pair of keys, one of which you keep private and the other of which you can share freely. Like a hash, the public key is opaque, and no one can determine your private key from it. (The number of bits in the key has to be doubled every decade or so as computers get more powerful.) This solution is generally <a href="https://cryptography.fandom.com/wiki/Diffie%E2%80%93Hellman_key_exchange">attributed to Whitfield Diffie, Martin Hellman, and Ralph Merkle</a>, although a British intelligence agent thought of the solution earlier and kept it secret.</p> <p>Diffie in particular was acutely conscious of social and political reasons for developing public key encryption. In the 1970s, I think that few people thought of doing online retail sales or services using encryption. It was considered a tool of spies and criminals—but also of political dissidents and muckraking journalists. These associations explain why the U.S. government tried to suppress it, or at least keep it from being exported, for decades.</p> <p>Diffie is still quite active in the field. The most recent article I've seen with him listed as an author was published on July 18, 2022.</p> <p>The linchpin of internet cryptography came shortly afterward with <a href="https://www.telsy.com/rsa-encryption-cryptography-history-and-uses/">RSA encryption</a>, invented by Ron Rivest, Adi Shamir, and Len Adleman. RSA encryption lets two parties communicate without previously exchanging keys, even public keys. (They were prevented from reaping much profit from this historic discovery because the U.S. government prevented the export of RSA technology during most of the life of their patent.)</p> <p>A big problem in key exchange remains: If someone contacts you and says they are Andy Oram, proffering what they claim to be Andy Oram's public key, how do you know they're really me? The two main solutions (web of trust and certificate authorities) are beyond the scope of this article, and each has vulnerabilities and a lot of overhead. Nevertheless, the internet seems to work well enough with certificate authorities.</p> <h2>The internet runs on hashes and cryptography</h2> <p>The internet essentially consists of huge computer farms in data centers, to which administrators and other users have to log in. For many years, the universal way to log into another system was Telnet, now abandoned almost completely because it's insecure. If you use Telnet, someone down the hall can watch your password cross the local network and steal the password. Anyone else who can monitor the network could do the same.</p> <p>Nowadays, all communication between users and remote computers goes over the secure shell protocol (SSH), which was invented <a href="https://www.oreilly.com/library/view/ssh-the-secure/0596008953/ch01s05.html">as recently as 1995</a>. All the cloud computing and other data center administration done nowadays depend on it.</p> <p>Interestingly, 1995 also saw the advent of the <a href="https://www.techtarget.com/searchsecurity/definition/Secure-Sockets-Layer-SSL">secure sockets layer</a> (SSL) protocol, which marks the beginning of web security. Now upgraded to Transport Layer Security (TLS), this protocol is used whenever you enter a URL beginning with HTTPS instead of HTTP. The protocol is so important that <a href="https://security.googleblog.com/2014/08/https-as-ranking-signal_6.html">Google penalizes web sites that use unencrypted HTTP</a>.</p> <p>Because most APIs now use web protocols, TLS also protects distributed applications. In addition to SSH and TLS, encryption can be found everywhere modern computer systems or devices communicate. That's because the modern internet is beset with attackers, and we use hashes and encryption to minimize their harm.</p> <p>Some observers think that quantum computing will soon have the power to break encryption as we know it. That could leave us in a scary world: Everything we send over the wire would be available to governments or large companies possessing quantum computers, which are hulking beasts that need to be refrigerated to within a few degrees of absolute zero. We may soon need a <a href="https://nakedsecurity.sophos.com/2022/08/03/post-quantum-cryptography-new-algorithm-gone-in-60-minutes/">new army of Luhns, Diffies, and other security experts</a> to find a way to save the internet as we know it.</p> The post <a href="https://developers.redhat.com/articles/2022/09/20/how-hashing-and-cryptography-made-internet-possible" title="How hashing and cryptography made the internet possible">How hashing and cryptography made the internet possible</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Andy Oram2022-09-20T07:00:00Z diff --git a/SOURCES/.metadata/.plugins/org.jboss.tools.central/valid_jboss_buzz.xml b/SOURCES/.metadata/.plugins/org.jboss.tools.central/valid_jboss_buzz.xml index 4d300d0..db96d04 100644 --- a/SOURCES/.metadata/.plugins/org.jboss.tools.central/valid_jboss_buzz.xml +++ b/SOURCES/.metadata/.plugins/org.jboss.tools.central/valid_jboss_buzz.xml @@ -1,2 +1,2 @@ -JBoss Tools Aggregated FeedJBoss Tools Aggregated FeedJBoss ToolsGCC's new fortification level: The gains and costsSiddhesh Poyarekaraa8105eb-3693-4988-8f01-b822ce7471ee2022-09-17T22:00:00Z2022-09-17T22:00:00Z<p>This article describes a new level of fortification supported in GCC. This new level detects more buffer overflows and bugs which mitigates security issues in applications at run time.</p> <p>C programs routinely suffer from memory management problems. For several years, a <code>_FORTIFY_SOURCE</code> preprocessor macro inserted error detection to address these problems at compile time and run time. To add an extra level of security, <code>_FORTIFY_SOURCE=3</code> has been in the GNU C Library (glibc) since version 2.34. I described its mechanisms in my previous blog post, <a href="https://developers.redhat.com/blog/2021/04/16/broadening-compiler-checks-for-buffer-overflows-in-_fortify_source">Broadening compiler checks for buffer overflows in _FORTIFY_SOURCE</a>. There has been compiler support for this builtin in <a href="https://clang.llvm.org">Clang</a> for some time. Compiler support has also been available for <a href="https://gcc.gnu.org">GCC</a> since the release of version 12 in May 2022. The new mitigation should be available in GNU/Linux distributions with packaged GCC 12.</p> <p>The following sections discuss two principal gains from this enhanced level of security mitigation and the resulting impact on applications.</p> <p><strong>2 principal gains:</strong></p> <ol> <li><p>Enhanced buffer size detection</p></li> <li><p>Better fortification coverage</p></li> </ol> <h2>1. A new buitin provides enhanced buffer size detection</h2> <p>There is a new buitin underneath the new <code>_FORTIFY_SOURCE=3</code> macro n GCC 12 named <code>__builtin_dynamic_object_size</code>. This builtin is more powerful than the previous <code>__builtin_object_size</code> builtin used in <code>_FORTIFY_SOURCE=2</code>. When passed a pointer, <code>__builtin_object_size</code>returns as a compile-time constant that is either the maximum or minimum object size estimate of the object that pointer may be pointing to at that point in the program. On the other hand, <code>__builtin_dynamic_object_size</code> is capable of returning a size expression that is evaluated at execution time. Consequently, the <code>_FORTIFY_SOURCE=3</code> builtin detects buffer overflows in many more places than <code>_FORTIFY_SOURCE=2</code>.</p> <p>The implementation of <code>__builtin_dynamic_object_size</code> in GCC is compatible with <code>__builtin_object_size</code> and thereby interchangeable, especially in the case of fortification. Whenever possible, the builtin computes a precise object size expression. When the builtin does not determine the size exactly, it returns either a maximum or minimum size estimate, depending on the size type argument.</p> <p>This code snippet demonstrates the key advantage of returning precise values:</p> <pre><code class="cpp">#include <string.h> #include <stdbool.h> #include <stdlib.h> char *b; char buf1[21]; char *__attribute__ ((noinline)) do_set (bool cond) { char *buf = buf1; if (cond) buf = malloc (42); memset (buf, 0, 22); return buf; } int main (int argc, char **argv) { b = do_set (false); return 0; } </code></pre> <p>The program runs to completion when built with <code>-D_FORTIFY_SOURCE=2</code>:</p> <pre><code>gcc -O -D_FORTIFY_SOURCE=2 -o sample sample.c </code></pre> <p>But the program aborts when built with <code>-D_FORTIFY_SOURCE=3</code> and outputs the following message:</p> <pre><code>*** buffer overflow detected ***: terminated Aborted (core dumped) </code></pre> <p>The key enhancement stems from the difference in behavior between <code>__builtin_object_size</code> and <code>__builtin_dynamic_object_size</code>. <code>_FORTIFY_SOURCE=2</code> uses <code>__builtin_object_size</code> and returns the maximum estimate for object size at pointer <code>buf</code>, which is 42. Hence, GCC assumes that the <code>memset</code> operation is safe at compile time and does not add a call to check the buffer size at run time.</p> <p>However, GCC with <code>_FORTIFY_SOURCE=3</code> invokes <code>__builtin_dynamic_object_size</code> to emit an expression that returns the precise size of the buffer that <code>buf</code> points to at that part in the program. As a result, GCC realizes that the call to <code>memset</code> might not be safe. Thus, the compiler inserts a call to <code>__memset_chk</code> into the running code with that size expression as the bound for <code>buf</code>.</p> <h2>2. Better fortification coverage</h2> <p>Building distribution packages with <code>_FORTIFY_SOURCE=3</code> revealed several issues that <code>_FORTIFY_SOURCE=2</code> missed. Surprisingly, not all of these issues were straightforward buffer overflows. The improved fortification also encountered issues in the GNU C library (glibc) and raised interesting questions about object lifetimes.</p> <p>Thus, the benefit of improved fortification coverage has implications beyond buffer overflow mitigation. I will explain the outcomes of <code>_FORTIFY_SOURCE=3</code> increased coverage in the following sections.</p> <h3>More trapped buffer overflows</h3> <p>Building applications with <code>_FORTIFY_SOURCE=3</code> detected many simple buffer overflows, such as the <a href="https://bugzilla.redhat.com/show_bug.cgi?id=2115476">off-by-one access in clisp</a> issue. We expected these revelations, which strengthened our justification for building applications with <code>_FORTIFY_SOURCE=3</code>.</p> <p>To further support the use of <code>_FORTIFY_SOURCE=3</code> to improve fortification, we used the <a href="https://github.com/siddhesh/fortify-metrics">Fortify metrics</a> GCC plugin to estimate the number of times _FORTIFY_SOURCE=3 resulted in a call to a checking function (<code>__memcpy_chk</code>, <code>__memset_chk</code>, etc.). We used Fedora test distribution and some of the <code>Server</code> package group as the sample, which consisted of 96 packages. The key metric is fortification coverage, defined by counting the number of calls to <code>__builtin_object_size</code> that resulted in a successful size determination and the ratio of this number taken to the total number of <code>__builtin_object_size</code> calls. The plugin also shows the number of successful calls if using <code>__builtin_dynamic_object_size</code> instead of <code>__builtin_object_size</code>, allowing us to infer the fortification coverage if all <code>__builtin_object_size</code> calls were replaced with <code>__builtin_dynamic_object_size</code>.</p> <p>In this short study, we found that <code>_FORTIFY_SOURCE=3</code> improved fortification by nearly 4 times. For example, the Bash shell went from roughly 3.4% coverage with <code>_FORTIFY_SOURCE=2</code> to nearly 47% with <code>_FORTIFY_SOURCE=3</code>. This is an improvement of nearly 14 times. Also, fortification of programs in <code>sudo</code> went from a measly 1.3% to 49.57% — a jump of almost 38 times!</p> <h3>The discovery of bugs in glibc</h3> <p>The increased coverage of <code>_FORTIFY_SOURCE=3</code> revealed programming patterns in application programs that tripped over the fortification without necessarily a buffer overflow. While there were some bugs in glibc, we had to either explain why we did not support it or discover ways to discourage those programming patterns.</p> <p>One example is <code>wcrtomb</code>, where glibc makes stronger assumptions about the object size passed than POSIX allowed. Specifically, glibc assumes that the buffer passed to <code>wcrtomb</code> is always at least <code>MB_CUR_MAX</code> bytes long. In contrast, the POSIX description makes no such assumption. Due to this discrepancy, any application that passed a smaller buffer would potentially make <code>wcrtomb</code> overflow the buffer during conversion. Then the fortified version <code>__wcrtomb_chk</code> aborts with a buffer overflow, expecting a buffer that is <code>MB_CUR_MAX</code> bytes long. We fixed this bug in glibc-2.36 by making glibc conform to POSIX .</p> <p><code>_FORTIFY_SOURCE=3</code> revealed another pattern. Applications such as systemd used <code>malloc_usable_size</code> to determine available space in objects and then used the residual space. The glibc manual discourages this type of usage, dictating that <code>malloc_usable_size</code> is for diagnostic purposes only. But applications use the function as a hack to avoid reallocating buffers when there is space in the underlying malloc chunk. The implementation of <code>malloc_usable_size</code> needs to be fixed to return the allocated object size instead of the chunk size in non-diagnostic use. Alternatively, another solution is to deprecate the function. But that is a topic for discussion by the glibc community.</p> <h3>Strict C standards compliance</h3> <p>One interesting use case exposed by <code>_FORTIFY_SOURCE=3</code> raised the question of object lifetimes and what developers can do with freed pointers. The bug in question was in <a href="https://sourceforge.net/p/autogen/bugs/212/">AutoGen</a>, using a pointer value after reallocation to determine whether the same chunk extended to get the new block of memory. This practice allowed the developer to skip copying over some pointers to optimize for performance. At the same time, the program continued using the same pointer, not the <code>realloc</code> call result, since the old pointer did not change.</p> <p>Seeing that the old pointer continued without an update, the compiler assumed that the object size remained the same. How could it know otherwise? The compiler then failed to account for the reallocation, resulting in an abort due to the perceived buffer overflow.</p> <p>Strictly speaking, the C standards prohibit using a pointer to an object after its lifetime ends. It should neither be read nor dereferenced. In this context, it is a bug in the application.</p> <p>However, this idiom is commonly used by developers to prevent making redundant copies. Future updates to <a href="https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105217">GCC</a> may account for this idiom wherever possible, but applications should also explicitly indicate object lifetimes to remain compliant. In the AutoGen example, a simple fix is to unconditionally refresh the pointer after reallocation, ensuring the compiler can detect the new object size.</p> <h2>The gains of improved security coverage outweigh the cost</h2> <p>Building with <code>_FORTIFY_SOURCE=3</code> may impact the size and performance of the code. Since <code>_FORTIFY_SOURCE=2</code> generated only constant sizes, its overhead was negligible. However, <code>_FORTIFY_SOURCE=3</code> may generate additional code to compute object sizes. These additions may also cause secondary effects, such as register pressure during code generation. Code size tends to increase the size of resultant binaries for the same reason.</p> <p>We need a proper study of performance and code size to understand the magnitude of the impact created by <code>_FORTIFY_SOURCE=3</code> additional runtime code generation. However the performance and code size overhead may well be worth it due to the magnitude of improvement in security coverage.</p> <h2>The future of buffer overflow detection</h2> <p><code>_FORTIFY_SOURCE=3</code> has led to significant gains in security mitigation. GCC 12 support brings those gains to distribution builds. But the new level of fortification also revealed interesting issues that require additional work to support correctly. For more background information, check out my previous article, <a href="https://www.redhat.com/en/blog/enhance-application-security-fortifysource">Enhance application security with FORTIFY_SOURCE</a>.</p> <p>Object size determination and fortification remain relevant areas for improvements in compiler toolchains. The toolchain team at Red Hat continues to be involved in the GNU and LLVM communities to make these improvements.</p> The post <a href="https://developers.redhat.com/articles/2022/09/17/gccs-new-fortification-level" title="GCC's new fortification level: The gains and costs">GCC's new fortification level: The gains and costs</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Siddhesh Poyarekar2022-09-17T22:00:00ZMy advice for updating Docker Hub's OpenJDK imageTim Ellison3bcb8704-1585-4386-8123-ee3bcc0890432022-09-16T18:00:00Z2022-09-16T18:00:00Z<p>The Java runtime environment in your containers could stop receiving updates in the coming months. It's time to take action. This article explains the decisions that led to this issue and proposes a solution.</p> <h2>OpenJDK and Java SE updates</h2> <p><a href="https://openjdk.org/">OpenJDK</a> is an open source implementation of the Java Platform, Standard Edition (Java SE), on which multiple companies and contributors collaborate.</p> <p>A project at OpenJDK represents each new feature release of the Java SE specification. Subsequent updates to those features, including functional and security fixes, are led by maintainers working in the <a href="https://openjdk.org/projects/jdk-updates/">JDK updates project</a>. Long-term supported releases such as Java SE 8 (since March 2014), Java SE 11 (since Sept 2018), and Java SE 17 (since Sept 2021) undergo a quarterly release update under the guidance of a lead maintainer.</p> <p>The <a href="https://openjdk.org/projects/jdk-updates/maintainers.html">repository maintainers' role</a> is to ensure that updates are both necessary and appropriate for deployed releases. They consider the opinions of multiple contributors when making such update decisions. Many vendors and distributors of Java SE subsequently build from the OpenJDK source code to provide new releases of their own branded Java SE offerings.</p> <p>Andrew Haley is the lead maintainer for Java 8 updates and Java 11 updates at Red Hat, and Goetz Lindenmaier (SAP) is the lead maintainer for Java 17 updates. Update maintainers affiliated with companies that provide commercially supported distributions of OpenJDK based on Java SE work as independent contributors to the project.</p> <h2>Docker Hub deprecates OpenJDK images</h2> <p>For many years, the official <a href="https://hub.docker.com/">Docker Hub</a> image builders took OpenJDK Java SE update binaries from <a href="https://adoptium.net/">Eclipse Adoptium</a> and other locations to build their own image. But in July 2022, the Docker Hub image builders <a href="https://hub.docker.com/_/openjdk">announced the deprecation</a> of this popular image.</p> <p>Now, Docker asks users to obtain their builds of OpenJDK, either from a commercial Java vendor or directly from the Adoptium project. There will be no further updates to the existing OpenJDK image, so users risk falling behind with functional and security updates to their Java SE usage unless they move to an alternate provider. I believe the official <a href="https://hub.docker.com/_/eclipse-temurin">Eclipse Temurin image</a> maintained by the Adoptium project is the obvious choice for a replacement image.</p> <h2>Eclipse Adoptium builds JDKs</h2> <p>OpenJDK does not provide binary updates directly from the update projects. Since July 2022, these long-term supported Java update projects have depended upon <a href="https://adoptium.net/">Eclipse Adoptium</a> to build and distribute consumable OpenJDK binaries.</p> <p>Adoptium is a project dedicated to building, testing, and distributing up-to-date and ready-to-use OpenJDK binaries under an open source license. Adoptium calls their builds of OpenJDK, Temurin. They are available across a broad range of processors and operating systems. These Temurin binaries have over half a billion downloads and earned the trust of enterprise production environments worldwide. A vendor-independent <a href="https://adoptium.net/members">working group</a> based at the Eclipse software foundation leads Adoptium.</p> <p>The Adoptium community provides binaries built directly from OpenJDK source code. These Temurin binaries are available as direct downloads, installers, or container images and are faithful representations of the OpenJDK update source built under controlled conditions.</p> <p>The <a href="https://hub.docker.com/_/eclipse-temurin">official Docker Hub Temurin images</a> contain the latest releases of the OpenJDK updates for several Java SE versions, thoroughly tested with various applications. The images work as direct drop-in replacements for the OpenJDK images. Some OpenJDK images already contain Temurin binaries.</p> <h2>How to move from OpenJDK images to Eclipse Temurin images</h2> <p>The Docker Hub's deprecation decision presents a problem. But there is a solution. We recommend moving from the <a href="https://hub.docker.com/_/openjdk">OpenJDK image</a> to <a href="https://hub.docker.com/_/eclipse-temurin">the official Docker Hub Eclipse Temurin image</a>.</p> <p>The process is simple. All you have to do is identify the <code>FROM</code> lines in Dockerfiles such as this:</p> <pre> <code class="java">FROM: openjdk:17</code></pre> <p>Change the lines as follows:</p> <pre> <code class="java">FROM eclipse-temurin:17</code></pre> <p>The process for changing the use of images other than version 17 is equivalent. You can <a href="https://github.com/adoptium/adoptium-support/issues">report</a> issues to the Adoptium community.</p> <h2>Red Hat support</h2> <p>We encourage everyone to switch to Eclipse Temurin. Many <a href="https://github.com/jenkinsci/docker/pull/1429">application images</a> and <a href="https://github.com/javastacks/spring-boot-best-practice/blob/fc6709cf2ec2fc00b4dfae7210ce503f9c10560c/spring-boot-docker/Dockerfile">examples of best practices</a> have successfully made the change.</p> <p>Red Hat recently <a href="https://developers.redhat.com/articles/2022/08/24/red-hat-expands-support-java-eclipse-temurin">announced direct support for Temurin</a> in development and production as part of Red Hat Runtimes, Red Hat OpenShift, and Red Hat Build of OpenJDK. Red Hat support assures customers that the move to Temurin will be smooth, allowing you to continue focusing on building products that integrate and automate modern business applications and processes.</p> The post <a href="https://developers.redhat.com/articles/2022/09/16/updating-docker-hubs-openjdk-image" title="My advice for updating Docker Hub's OpenJDK image ">My advice for updating Docker Hub's OpenJDK image </a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Tim Ellison2022-09-16T18:00:00ZRegex how-to: Quantifiers, pattern collections, and word boundariesBob Reselman2182a29a-626a-444f-a313-1e4a14d6eeb72022-09-16T07:00:00Z2022-09-16T07:00:00Z<p>Filtering and searching text with regular expressions is an important skill for every developer. Regular expressions can be tricky to master. To work with them effectively, you need a detailed understanding of their symbols and syntax.</p> <p>Fortunately, learning to work with regular expressions can be incremental. You don't need to learn everything all at once to do useful work. Rather, you can start with the basics and then move into more complex topics while developing your understanding and using what you know as you go along.</p> <p>This article is the second in a series. The <a href="https://developers.redhat.com/articles/2022/08/03/beginners-guide-regular-expressions-grep">first article</a> introduced some basic elements of regular expressions: The basic metacharacters (<code>.*^$\s\d</code>) as well as the escape metacharacter <code>\</code>.</p> <p>This article introduces some more advanced syntax: quantifiers, pattern collections, groups, and word boundaries. If you haven't read the first article, you might want to review it now before continuing with this content.</p> <p>These articles demonstrate regular expressions by piping string output from an <a href="https://www.redhat.com/sysadmin/essential-linux-commands"><code>echo</code></a> command to the <a href="https://www.redhat.com/sysadmin/how-to-use-grep"><code>grep</code></a> utility. The <code>grep</code> utility uses a regular expression to filter content. The benefit of demonstrating regular expressions using <code>grep</code> is that you don't need to set up any special programming environment. You can execute an example of a regular expression immediately by copying and pasting the code directly into your terminal window running under Linux.</p> <h2>What's the difference between a regular character and a metacharacter</h2> <p>A regular character is a letter, digit, or punctuation used in everyday text. When you declare a regular character in a regular expression, the regular expression engine searches content for that declared character. For example, were you to declare the regular character <code>h</code> in a regular expression, the engine would look for occurrences of the character <code>h</code>.</p> <p>A metacharacter is a placeholder symbol. For example, the metacharacter <code>.</code> (dot) represents "any character," and means <em>any character matches here.</em> The metacharacter <code>\d</code> represents a numerical digit, and means <em>any digit matches here.</em> Thus, when you use a metacharacter, the regex engine searches for characters that comply with the particular metacharacter or set of metacharacters.</p> <h2>What are quantifiers?</h2> <p>A quantifier is a syntactic structure in regular expressions that indicates the number of times a character occurs in sequence in the input text. There are two ways to declare a quantifier. One way is:</p> <pre> <code class="java">x{n}</code></pre> <p>In this syntax:</p> <ul> <li><code>x</code> is the character to match.</li> <li><code>n</code> indicates the number of times the character needs to occur.</li> </ul> <p>A related syntax declares a quantifier with a minimum and maximum range:</p> <pre> <code class="java">x{n,m}</code></pre> <p>In this syntax:</p> <ul> <li><code>x</code> is the character to match.</li> <li><code>n</code> indicates the minimum number of occurrences and <code>m</code> indicates the maximum number of occurrences.</li> </ul> <p>The following example uses a quantifier to create a matching pattern that identifies two occurrences of the regular character <code>g</code> in sequence:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'g{2}'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gre<strong>gg</strong> and the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>Thus, the regular expression returns the following result:</p> <pre> <code class="java">gg</code></pre> <p>The following example uses a quantifier to create a matching pattern that identifies a minimum and a maximum for occurrences of the character <code>g</code> in a sequence. The minimum length is 1 and the maximum is 2. The regular expression is processed in a case-insensitive manner, as indicated by the <code>-i</code> option to <code>grep</code>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Poi 'g{1,2}'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. <strong>G</strong>re<strong>gg</strong> and the do<strong>g</strong> Fido. Chris has 1 bird named Tweety.</code></p> <p>Because each sequence is identified and returned on a one-by-one basis, the output is:</p> <pre> <code class="java">G gg g</code></pre> <h2>What are pattern collections?</h2> <p>A pattern collection is a syntactic structure that describes a <a href="https://www.gnu.org/software/grep/manual/html_node/Character-Classes-and-Bracket-Expressions.html">character class</a>. A character class is a set of metacharacters and regular characters that combine to create a matching pattern that, like a metacharacter, can match many different characters in text. A pattern collection is defined between square brackets (<code>[ ]</code>).</p> <p>The following example uses the <code>[A-Z]</code> character class, which denotes any uppercase character from <code>A</code> to <code>Z</code> inclusive, to create a pattern collection that matches only uppercase characters in the given text:</p> <p><code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[A-Z]'</code></p> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code><strong>J</strong>eff and the pet <strong>L</strong>ucky. <strong>G</strong>regg and the dog <strong>F</strong>ido. <strong>C</strong>hris has 1 bird named <strong>T</strong>weety.</code></p> <p>The output is:</p> <pre> <code class="java">J L G F C T</code></pre> <p>The following example uses the <code>[0-9]</code> character class, which denotes any digit between <code>0</code> and <code>9,</code> to create a pattern collection that matches only numeric characters in the given text:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[0-9]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has <strong>1</strong> bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">1</code></pre> <p>The following example uses a pattern collection that matches certain exact regular characters within a set of regular characters. The regular expression says: <em>Match any <code>f</code>, <code>G</code>, or <code>F</code></em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[fGF]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Je<strong>ff</strong> and the pet Lucky. <strong>G</strong>regg and the dog <strong>F</strong>ido. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">f f G F</code></pre> <p>The following example uses a pattern collection with both metacharacters and regular characters. The logic behind the regular expression says: <em>Match any <code>g</code>, <code>r</code>, or <code>e</code> followed by a space character and then the string <code>Fido</code></em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[gre]\sFido'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gregg and the do<strong>g Fido</strong>. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">g Fido</code></pre> <p>The following example uses two pattern collections along with metacharacters that are outside them. The regular expression says: <em>Match a numeric character, then continue matching any character zero or many times that is followed by an uppercase character</em>. The pattern collection <code>[0-9]</code> indicates any numeral from <code>0</code> to <code>9</code>. The metacharacters <code>.*</code> indicate zero or more instances of any character, and the pattern collection <code>[A-Z]</code> indicates any uppercase character from <code>A</code> to <code>Z</code>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '[0-9].*[A-Z]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has <strong>1 bird named T</strong>weety.</code></p> <p>The output is:</p> <pre> <code class="java">1 bird named T</code></pre> <p>The following example uses the negation metacharacter <code>^</code> within a pattern collection. The negation metacharacter indicates that the succeeding characters are <em>not</em> to be matched when the regular expression is being executed.</p> <p class="Indent1"><strong>Note</strong>: As you might remember from the first article in this series, <code>^</code> is the same metacharacter that indicates a line start—but only when used <em>outside</em> square brackets. The <code>^</code> metacharacter indicates negation <em>only</em> when it appears within the square brackets (<code>[ ]</code>) that declare a pattern collection.</p> <p>The following collection pattern says: <em>Match any character that is not <code>a</code>, <code>e</code>, <code>i</code>, <code>o</code>, or <code>u</code></em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky." $ echo $teststr | grep -Po '[^aeiou]'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. The text is underlined to make the space characters apparent:</p> <p><code><u><strong>J</strong>e<strong>ff </strong>a<strong>nd th</strong>e<strong> p</strong>e<strong>t L</strong>u<strong>cky.</strong></u></code></p> <p>Space characters in the following output are also underlined to make them apparent. Space characters are matched by this regular expression:</p> <pre class="language-bash"> <code>J f f _ n d _ t h _ p t _ L c k y . </code></pre> <h2>Groups</h2> <p>A group in a regular expression is, as the name implies, a group of characters declared according to a specific definition. A group declaration can include metacharacters and regular characters. A group is declared between open and closed parentheses like this: <code>( )</code>.</p> <p>The following example uses a <code>.</code> (dot) metacharacter, which indicates "any character." The declared group says: <em>Match any three characters as a group and return each group</em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '(...)'</code></pre> <p>The regular expression matches the characters highlighted in alternating bold and non-bold text as shown in the following text. Again, the text is underlined to make the space characters apparent:</p> <p><code><u><strong>Jef</strong>f a<strong>nd </strong>the<strong> pe</strong>t L<strong>uck</strong>y. <strong>Gre</strong>gg <strong>and</strong> th<strong>e d</strong>og <strong>Fi</strong>do. <strong>Chr</strong>is <strong>has</strong> 1 <strong>bir</strong>d n<strong>ame</strong>d T<strong>wee</strong>ty.</u></code></p> <p>Because the group is identified and returned on a one-by-one basis, the output is:</p> <pre> <code class="language-bash">Jef f_a nd_ the _pe t_L uck y._ Gre gg_ and _th e_d og_ Fid o._ Chr is_ has _1_ bir d_n ame d_T wee ty. </code></pre> <p>The following example uses the <code>.</code> (dot) metacharacter along with the regular character <code>y</code> to define a group of three characters, of which the first two characters can be anything and the third character must be <code>y</code>.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '(..y)'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>Jeff and the pet Lu<strong>cky</strong>. Gregg and the dog Fido. Chris has 1 bird named Twe<strong>ety</strong>.</code></p> <p>The output is:</p> <pre> <code class="java">cky ety</code></pre> <p>The following example demonstrates a regular expression group that uses the <code>.</code> (dot) metacharacter along with the <code>\d</code> metacharacter to define a group of five characters, of which the first two characters are any regular character, the third character is a digit, and the last two characters are any regular characters:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '(..\d..)'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. The text is underlined to make the space characters apparent.</p> <p><code><u>Jeff and the pet Lucky. Gregg and the dog Fido. Chris ha<strong>s 1 b</strong>ird</u></code><code><u> named Tweety.</u></code></p> <p>The output is:</p> <pre> <strong><code class="java">s<u> </u>1<u> </u>b</code></strong></pre> <h2>Word boundaries</h2> <p>A word character is declared using the metacharacters <code>\w</code>. A word character indicates any uppercase character, lowercase character, numeric character, or connector character such as a hyphen.</p> <p>A word boundary is defined as a transition between a word character and a beginning space, an ending space, or a punctuation mark ( <code>.!?</code> ). A word boundary is declared using the metacharacters <code>\b</code>.</p> <p>The following example demonstrates a regular expression that uses the metacharacters <code>\w+</code> to find occurrences of words within text. The metacharacter <code>+</code> indicates one or more occurrences of a character. The logic in play is: <em>Match one or more word characters</em>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. $ echo $teststr | grep -Po '\w+'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code><strong>Jeff</strong> <strong>and</strong> <strong>the</strong> <strong>pet</strong> <strong>Lucky</strong></code></p> <p>Because each word is identified and returned on a one-by-one basis, the output is:</p> <pre> <code class="java">Jeff and the pet Lucky</code></pre> <p>The following example uses a word boundary to find occurrences of the regular character <code>a</code> that appears at the beginning of a word:</p> <pre> <code class="language-bash">"Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '\ba'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text:</p> <p><code>and the pet Lucky. Gregg <strong>a</strong>nd the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">a a</code></pre> <p>The following example uses a word boundary to find occurrences of the regular character <code>y</code> that appear at the end of a word:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'y\b'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. Note that punctuation marks at the end of a word are not considered word characters and are excluded from the match:</p> <p><code><u>Jeff and the pet Luck<strong>y</strong>. Gregg and the dog Fido. Chris has 1 bird named Tweet<strong>y</strong>.</u></code></p> <p>The output is:</p> <pre> <code class="java">y y</code></pre> <p>The following example uses a word boundary to find occurrences of the regular characters <code>Tweety</code> that appear at the end of a word:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'Tweety\b'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text. Again, notice that punctuation marks at the end of a word are excluded:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named <strong>Tweety</strong>.</code></p> <p>The output is:</p> <pre> <code class="java">Tweety</code></pre> <p>The following example contains a regular expression group that uses word boundaries to find occurrences of words that start with the regular character <code>a</code> and end with the regular character <code>d</code>. The regular expression uses the metacharacters <code>\w*</code> to declare all occurrences of word characters:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '\ba\w*d\b'</code></pre> <p>The regular expression matches the characters highlighted in bold in the following text.</p> <p><code>Jeff <strong>and</strong> the pet Lucky. Gregg <strong>and</strong> the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>The output is:</p> <pre> <code class="java">and and</code></pre> <h2>Grouping and specifying multiple characters simultaneously extend regular expressions</h2> <p>This article gave you an introduction to working with quantifiers, pattern collections, groups, and word boundaries. You learned to use quantifiers to declare a range of character occurrences to match. Also, you learned that pattern collections enable you to declare character classes that match characters in a generic manner. Groups execute matches that declare a particular set of characters. Word boundaries allow you to make matches by working within the boundaries of space characters and punctuation marks.</p> <p>These intermediate concepts covered in this article will bring additional power and versatility to working regular expressions. But there's a lot more to learn. Fortunately, as mentioned at the beginning of this article, you can use the concepts and techniques discussed in this article immediately.</p> <p>The key is to start practicing what you've learned now. Mastery is the result of small, incremental accomplishments. As with any skill, the more you practice, the better you'll get.</p> The post <a href="https://developers.redhat.com/articles/2022/09/16/regex-how-quantifiers-pattern-collections-and-word-boundaries" title="Regex how-to: Quantifiers, pattern collections, and word boundaries">Regex how-to: Quantifiers, pattern collections, and word boundaries</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Bob Reselman2022-09-16T07:00:00ZNew Keycloak maintainer: Michal HajasStian Thorgersenhttps://www.keycloak.org/2022/09/mhajas2022-09-16T00:00:00ZWe are pleased to welcome as an official maintainer of Keycloak. Michal has been with the Keycloak project since September 2015, and since that period has to almost every component of Keycloak - core server, authorization services, adapters, javascript, code auto-generation, legacy operator - either by review or code contribution. Since his first involvement, he has steadily contributed code, currently . Lately, he has designed and co-developed Hot Rod storage and has been instrumental in overall establishing the new map storage. He reviews community contributions and offers help to finalize PRs, as well as participates in community discussions and issue triaging. He understands and respects the code of conduct, and in reviews helps maintaining it.Stian ThorgersenMultiple repositories Pull Request chaos, crawl them all in one single placeEnrique Mingorance Canohttps://blog.kie.org/2022/09/multiple-repositories-pull-request-chaos-crawl-them-all-in-one-single-place.html2022-09-15T17:00:00ZFlickr chaos – https://bit.ly/3Q2zfYS It is very frequent to find software engineering projects where multiple repositories are involved for the same or different projects, somehow related between them, a lot of people push their pull requests to any of them and it is very normal to lose tracking of the situation or you have to constantly browse them all to have a clearer picture about what is going on. That’s the situation we had here at the Red Hat Business Automation team and we solved it by creating a helpful tool you can easily use for your set of projects, easy, quick and for free. THE CROSS-REPO PRS PROBLEM This is already covered by entry, so feel free to read it in case you are not familiar with this kind of situation or concepts. THE CHAIN-STATUS SOLUTION So we said to ourselves, what if we would have a centralized place, a web page for instance, to be able to see in a quick look what’s the situation about all the pull requests for all of our repositories? was the solution. Prerequisites: * It has to solve not only our particular problem, so anyone can use it. * It has to be public, no authentication required. * It has to be fast, we can’t wait for the whole pull request set to be crawled everytime anyone gets into the application. * Multiple streams or different project set can be handled in different views, like different products or product versions from the same place. * The content can be filtered out. So the conclusion was to create in one hand a React web page to consume the pull request information from a static report and another tool to generate that report based on Github information. This way: * The information will be produced asynchronously, the frequency will be up to the user/developer and Github API rate limit problems will be avoided. * The information can be taken even from private repositories and be exposed publicly and no authentication will be required.  * No waiting time while information is requested from Github service. * The webpage (HTML+JS files) can be stored on any web service, even on free services like or . * No backend server is required. RUNNING EXAMPLE You can check KIE RHBA status web page at Chain Status web tool screenshot HOW CAN I ADD IT TO MY ORGANIZATION? The best way to integrate this tool in your organization or set of repositories is by using the provided configurable . In particular this tool comes with two main easy-to-use : * Generate App: this action aims to build and copy the React web application inside your repository and publish it using NPM tool. * Generate Data: given a project structure and some project information as input, this action is focused on generating the data report gathering the information using the Github API. This report is then used by the web application as a content source. Thus, in order to use these actions on your organization, you only have to add two (one per action) on your main repository as follows: 1. Prerequisites: having a Github token properly configured in your organization, on how to configure it. 2. Generate app workflow (generate_status_page.yaml): add the Github workflow for the web page generation, this should generally be run only once (or whenever there are changes on the web app look and feel). name: Generate status page on: workflow_dispatch jobs:   generate-status-page:     if: github.repository_owner == '<OWNER>'     concurrency:       group: generate-status-page       cancel-in-progress: true     strategy:       matrix:         os: [ubuntu-latest]       fail-fast: true     runs-on: ubuntu-latest     name: Generate status page     steps:       - name: Generate status page         uses: kiegroup/chain-status/.ci/actions/generate-app@main         with:           info-md-url: "<PATH-TO-INFO>"           github-token: "${{ secrets.GITHUB_TOKEN }}"           gh-pages-branch: "gh-pages" 3. Generate data workflow (generate_status_page_data.yaml): add the periodic workflow that will continuously generate the data fetched by the web application. name: Generate status page data on:   workflow_dispatch:   schedule:     - cron: '0 * * * *' jobs:   generate-status-page-data:     if: github.repository_owner == '<OWNER>'     concurrency:       group: generate-status-page-data       cancel-in-progress: true     strategy:       matrix:         os: [ubuntu-latest]       fail-fast: true     runs-on: ubuntu-latest     name: Generate status page data     steps:       - name: Generate status page data         uses: kiegroup/chain-status/.ci/actions/generate-data@main         with:           definition-file: <PATH-TO-DEFINITION-FILE>           # projects: <PROJECTS-LIST>           title: <TITLE>           subtitle: <SUBTITLE>           base-branch-filter: <BRANCH-LIST>           created-by: Github Action           created-url: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}           logger-level: debug           github-token: "${{ secrets.GITHUB_TOKEN }}"           gh-pages-branch: "gh-pages"  As already introduced, the generate data flow relies on a project structure definition which can be provided either using build-chain definition file or a projects list: * Build-chain definition file (using ‘definition-file’ field), a YAML definition file for cross-related inter-dependent projects which was introduced for . This tool is already covered by , so feel free to read it if you want to get more details on it and on its definition files. * Projects list (using ‘projects’ field), a comma-separated list of projects for which you would like to provide Pull Requests statuses. [Still a Work in Progress ] This was a brief explanation on how you could integrate this tool in your organization, if you need more details on this feel free to reach the homepage, where you can find a step-by-step guide on how to integrate it with some links to running examples. ADDITIONAL FUNCTIONALITIES Additionally to the pull request summary functionality, it is also possible to add multiple Jenkins status reports. The main advantage of this feature is that you can check the status of all your Jenkins jobs in a single place, making it easier to check what runs succeeded/failed and also the time and average time jobs are consuming. As an example you can check the KIE RHBA daily builds page   To configure the Jenkins status reports feature, you can create a Jenkins pipeline that will generate and update the data periodically. You can schedule the Jenkins pipeline to run and keep the status updated based on your required demand. You can add the following steps as part of your Jenkins pipeline to generate and update the status report: 1. Clone the GitHub pages repository stage('Clone gh-pages repository') {   steps {     script {       println "Checking out https://github.com/${ghPagesRepository}:${ghPagesBranch} into ${ghPagesRepoFolder} folder"       sh "git clone -b ${ghPagesBranch} --single-branch https://github.com/${ghPagesRepository} ${ghPagesRepoFolder}"     }   } } 2. Install the chain-status tool stage('Install chain-status tool') {   steps {     script {       try {         sh "npm install -g @kie/chain-status-action"       } catch(e) {         println '[WARNING] Error installing @kie/chain-status-action.'       }     }   } } 3. Generate the updated data stage('Generate data') {   steps {     script {       dir(ghPagesRepoFolder) {         sh "build-chain-status-report --jenkinsUrl ${jenkinsURL} --jobUrl ${jenkinsJobPath} -t ${projectTitle} -st ${projectSubtitle} --certFilePath ${jenkinsCertFile} --outputFolderPath ./data/ --skipZero -cb \"Jenkins Job\" -cu \"${env.BUILD_URL}\" --order 1001"       }     }   } } 4. Push changes to update the status report stage('Push changes to repository') {   steps {     script {       println "Pushing changes to ${ghPagesRepository}:${ghPagesBranch}"         dir(ghPagesRepoFolder) {           withCredentials([usernamePassword(credentialsId: "${githubCredentialsId}", usernameVariable: 'GITHUB_USER', passwordVariable: 'GITHUB_TOKEN')]) { githubscm.setUserConfig("${GITHUB_USER}")           sh("git config --local credential.helper \"!f() { echo username=\\$GITHUB_USER; echo password=\\$GITHUB_TOKEN; }; f\"")           sh 'git add data/*'           sh 'git commit -m "Generate Jenkins Data"'           sh "git push origin ${ghPagesBranch}"                                   }       }     }   } } NEXT STEPS AND LIMITATIONS HISTORIC FUNCTIONALITY Since the generator tool registers every day status, we expect to offer the historic view functionality to be able to compare status between dates.See TO COVER NOT ONLY GITHUB BUT OTHER REPOSITORY SERVICES Right now we only cover Github for the generator tool to take information from, but we expect to cover another kind of services like Gitlab or Bitbucket. CONCLUSION We have been using this tool for , and repositories for a year and we can say it’s a very useful tool which solves the cross-repo pull requests summary problem. After a year of experience with the tool we can say the tool offers: * To be able to constantly see the status of the different contributions from the different people. * Who is working on what, like which are my own open pull requests. * To quickly check obsolete contributions and to be able to keep our repositories very clean. * To publicly offer Jenkins jobs summary no matter whether the Jenkins is accessible or not. * To quickly check how healthy our CI/CD stuff is thanks to the error index information from the tool. * To be able to see related pull requests for every pull request, thanks to the cross-repo pull request functionality. USEFUL LINKS [Chain status]   [Build chain tool] [Build chain npm package]   [Configuration reader] [RHBA definition and project tree files]  [RHBA flows]   Featured photo by The post appeared first on .Enrique Mingorance CanoHow to implement a job queue with RedisClement Escoffier (https://twitter.com/clementplop)https://quarkus.io/blog/redis-job-queue/2022-09-15T00:00:00Z2022-09-15T00:00:00ZIn how to cache with Redis, we implemented a simple cache backed by Redis. That’s just one use case of Redis. Redis is also used as a messaging server to implement the processing of background jobs or other kinds of messaging tasks. This post explores implementing this pattern with Quarkus...Clement Escoffier (https://twitter.com/clementplop)2022-09-15T00:00:00ZGetting started with Jakarta RESTful ServicesF.Marchionihttp://www.mastertheboss.com/jboss-frameworks/resteasy/getting-started-with-jakarta-restful-services/2022-09-14T16:44:00ZThe latest release of RESTEasy (6.1.0) provides an implementation for Jakarta RESTful Web Services 3.1. which is a core component of Jakarta EE 10. Let’s review through this article which are the core features of Jakarta Rest Services. What’s new in Jakarta RESTful Web Services 3.1 The Jakarta RESTful Web Services 3.1 specification (part of ... The post appeared first on .F.MarchioniA beginner’s guide to regular expressions with grepBob Reselmand2745cb9-0e7f-4c18-88b2-a5ce98fb99ac2022-09-14T07:00:00Z2022-09-14T07:00:00Z<p>A <em>regular expression</em> (also called a <em>regex</em> or <em>regexp</em>) is a rule that a computer can use to match characters or groups of characters within a larger body of text. For instance, using regular expressions, you could find all the instances of the word <em>cat</em> in a document, or all instances of a word that begins with <em>c</em> and ends with <em>t.</em></p> <p>Use of regular expressions in the real world can get much more complex—and powerful—than that. For example, imagine you need to write code verifying that all content in the body of an HTTP POST request is free of script injection attacks. Malicious code can appear in any number of ways, but you know that injected script code will always appear between <code><script></script></code> HTML tags. You can apply the regular expression <code><script>.*<\/script></code>, which matches any block of code text bracketed by <code><script></code> tags, to the HTTP request body as part of your search for script injection code.</p> <p>This example is but one of many uses for regular expressions. In this series, you'll learn more about how the syntax for this and other regular expressions work.</p> <p>As just demonstrated, a regex can be a powerful tool for finding text according to a particular pattern in a variety of situations. Once mastered, regular expressions provide developers with the ability to locate patterns of text in source code and documentation at design time. You can also apply regular expressions to text that is subject to algorithmic processing at runtime such as content in HTTP requests or event messages.</p> <p>Regular expressions are supported by many programming languages, as well as classic command-line applications such as <a href="https://www.redhat.com/sysadmin/linux-text-manipulation-tools">awk, sed, and grep</a>, which were developed for Unix many decades ago and are now offered on GNU/Linux.</p> <p>This article examines the basics of using regular expressions under <code>grep</code>. The article shows how you can use a regular expression to declare a pattern that you want to match, and outlines the essential building blocks of regular expressions, with many examples. This article assumes no prior knowledge of regular expressions, but you should understand how to with the <a href="https://developers.redhat.com/topics/linux">Linux</a> operating system at the command line.</p> <h2>What are regular expressions, and what is grep?</h2> <p>As we've noted, a regular expression is a rule used for matching characters in text. These rules are <em>declarative,</em> which means they are immutable: once declared, they do not change. But a single rule can be applied to any variety of situations.</p> <p>Regular expressions are written in a special language. Although this language has been standardized, dialects vary from one regular expression engine to another. For example, <a href="https://developers.redhat.com/topics/javascript">JavaScript</a> has a regex dialect, as do <a href="https://developers.redhat.com/topics/c">C++</a>, <a href="https://developers.redhat.com/java">Java</a>, and <a href="https://developers.redhat.com/topics/python">Python</a>.</p> <p>This article uses the regular expression dialect that goes with the Linux <a href="https://www.redhat.com/sysadmin/how-to-use-grep">grep</a> command, with an extension to support more powerful features. <code>grep</code> is a binary executable that filters content in a file or output from other commands (stdout). Regular expressions are central to <code>grep</code>: The <em>re</em> in the middle of the name stands for "regular expression."</p> <p>This article uses <code>grep</code> because it doesn't require that you set up a particular coding environment or write any code to work with the examples of regular expressions demonstrated in this article. All you need to do is copy and paste an example onto the command line of a Linux terminal and you'll see results immediately. The <code>grep</code> command can be used in any shell.</p> <p>Because this article focuses on regular expressions as a language, and not on manipulating files, the examples use samples of text piped to <code>grep</code> instead of input files.</p> <h3>How to use grep against content in a file</h3> <p>To print lines in a file that match a regular expression, use the following syntax:</p> <pre> <code class="language-bash">$ grep -options <regular_expression> /paths/to/files</code></pre> <p>In this command syntax:</p> <ul> <li><code>-options</code>, if specified, control the behavior of the command.</li> <li><code><regular_expression></code> indicates the regular expression to execute against the files.</li> <li><code>/paths/to/files</code> indicate one or more files against which the regular will be executed.</li> </ul> <p>The options used in this article are:</p> <ul> <li><code>-P</code>: Apply regular expressions in the style of the Perl programming language. This option, which is specific to GNU/Linux, is used in the article to unlock powerful features that aren't recognized by <code>grep</code> by default. There is nothing specific to Perl in the regular expressions used in this article; the same features can be found in many programming languages.</li> <li><code>-i</code>: Match in a case-insensitive manner.</li> <li><code>-o</code>: Print only the characters matching the regular expression. By default, the whole line containing the matching string is printed.</li> </ul> <h3>How to pipe content to a regular expression</h3> <p>As mentioned earlier, you can also use a regular expression to filter output from stdout. The following example uses the pipe symbol (<code>|</code>) to feed the result of an <code>echo</code> command to <code>grep</code>.</p> <pre> <code class="language-bash">$ echo "I like using regular expressions." | grep -Po 'r.*ar'</code></pre> <p>The command produces the following output:</p> <pre> <code class="language-java">regular</code></pre> <p>Why does <code>grep</code> return the characters <code>regular</code> to match the regular expression specified here? We'll explore the reasons in subsequent sections of this article.</p> <h2>Regular characters, metacharacters, and patterns: The building blocks of regular expressions</h2> <p>You'll use three basic building blocks when working with regular expressions: <em>regular characters, metacharacters,</em> and <em>patterns.</em> Regular characters and metacharacters are used to create a regular expression, and that regular expression represents a matching pattern that the regex engine applies to some content.</p> <p>You can think of a metacharacter as a placeholder symbol. For example, the <code>.</code> metacharacter (a dot or period) represents "any character." The <code>\d</code> metacharacter represents any single numeral, 0 through 9.</p> <p>The <code>*</code> metacharacter is a shorthand that represents the instruction "search for a character that occurs zero or more times as defined by the preceding character." (You'll see how to work with the <code>*</code> metacharacter in sections to come.)</p> <p>Regular expressions support many metacharacters, each worthy of a page or two of description. For now, the important thing to understand is that a metacharacter is a reserved symbol used by the regex engine to describe a character in a generic manner. Also, certain metacharacters are a shorthand for a search instruction.</p> <p>You can combine regular characters with metacharacters to declare rules that define search patterns. For example, consider the following short regular expression:</p> <pre> <code class="language-java">.t</code></pre> <p>This matches a pattern consisting of two characters. The first character can be any character, as declared by the <code>.</code> (dot) metacharacter, but the second character must be <code>t</code>. Thus, applying the regular expression <code>.t</code> to the string <code>I like cats but not rats</code> matches the strings highlighted in bold font here:</p> <p><code>I like c<strong>at</strong>s b<strong>ut</strong> n<strong>ot</strong> r<strong>at</strong>s</code></p> <p>You can do a lot using just the basic metacharacters to create regular expressions with <code>grep</code>. The following sections provide a number of useful examples.</p> <h2>Running basic regular expressions</h2> <p>The following subsections demonstrate various examples of regular expressions. The examples are presented as two commands to enter in a Linux terminal. The first command creates a variable named <code>teststr</code> that contains a sample string. The second executes the <code>echo</code> command against <code>teststr</code> and pipes the result of the <code>echo</code> command to <code>grep</code>. The <code>grep</code> command then filters the input according to the associated regular expression.</p> <h3>How to declare an exact pattern match using regular characters</h3> <p>The following example demonstrates how to search a string according to the pattern of regular characters, <code>Fido</code>. The search declaration is case-sensitive:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'Fido'</code></pre> <p>The result is:</p> <pre> <code class="language-java">Fido</code></pre> <h3>How to declare a case-insensitive exact pattern match</h3> <p>The following example demonstrates how to search a string according to a pattern of regular characters, <code>fido</code>. The search declaration is case-insensitive, as indicated by the <code>-i</code> option in the <code>grep</code> command. Thus, the regex engine will find occurrences such as <code>FIDO</code> as well as <code>fido</code> or <code>fiDo</code>.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Poi 'fido'</code></pre> <p>The result is:</p> <pre> <code class="language-java">Fido</code></pre> <h3>How to declare a logical pattern match</h3> <p>The following example uses the <code>|</code> metacharacter symbol to search according to a <em>this or that</em> condition—that is, a condition that can be satisfied by either of the regular expressions on either side of <code>|</code>. In this case, the regular expression matches occurrences of the regular character <code>f</code> or <code>g</code>:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'f|g'</code></pre> <p>The <code>grep</code> command identifies each occurrence that satisfies the rule declared in the regular expression. Conceptually, the regular expression is saying, <em>Return any character that is either an f or a g</em>. We are leaving the search case-sensitive, as is the default. Thus, the identified characters are highlighted in bold text here:</p> <p><code>Je<strong>ff</strong> and the pet Lucky. Gre<strong>gg</strong> and the do<strong>g</strong> Fido. Chris has 1 bird named Tweety.</code></p> <p>Because each character is identified and returned on a one-by-one basis, the output sent to the terminal window is:</p> <pre> <code class="language-java">f f g g g</code></pre> <h3>How to find a character at the beginning of a line</h3> <p>The following example uses the <code>^</code> metacharacter to search for the beginning of a line of text. Conceptually, the <code>^</code> metacharacter matches the beginning of a line.</p> <p>The example executes the regular expression <code>^J</code>. This regular expression searches for a match that satisfies two conditions. The first condition is to find the beginning of the line; the next is to find the regular character <code>J</code> at that position.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '^J'</code></pre> <p>The regular expression matches the character highlighted in bold text as shown here:</p> <p><code><strong>J</strong>eff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety.</code></p> <p>The result returned to the terminal is:</p> <pre> <code class="language-java">J</code></pre> <h3>How to find a character at the end of a line</h3> <p>The following example uses the <code>$</code> metacharacter to search for the end of a line to text.</p> <p>The example executes the regular expression <code>\.$</code>. The regular expression declares a matching rule that has two conditions. First, the regular expression searches for an occurrence of the regular character <code>.</code> (dot). Then the regular expression looks to see whether the end of the line is next. Thus, if the <code>.</code> character comes at the end of the line, it's deemed a match.</p> <p>The regular expression includes a backslash (<code>\</code>) as an "escape" metacharacter before the dot. The escape metacharacter is needed to override the normal meaning of the dot as a metacharacter. Remember that the <code>.</code> (dot) metacharacter means <em>any character</em>. With the escape character, the dot is treated as a regular character, and so matches just itself:</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po '\.$'</code></pre> <p>The regular expression matches the final dot in the text, highlighted in bold as shown here:</p> <p><code>Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety<strong>.</strong></code></p> <p>The result is just the final dot:</p> <pre> <code class="language-java">.</code></pre> <p>Suppose you were to use an unescaped dot in the regular expression:</p> <pre> <code class="language-bash">$ echo $teststr | grep -Po '.$'</code></pre> <p>You would get the same result as using the escaped dot, but a different logic is being executed. That logic is: <em>Match any character that is the last character before the end of the string</em>. Thus, the regular expression would always match any line. Using the escape character to identify a character as a regular character is a subtle distinction in this case, but an important one nonetheless.</p> <h3>How to find multiple characters at the end of a line</h3> <p>The following example searches the string assigned to the variable <code>teststr</code> to match the characters <code>ty.</code> when they appear at the end of a line.</p> <pre> <code class="language-bash">$ teststr="Jeff and the pet Lucky. Gregg and the dog Fido. Chris has 1 bird named Tweety." $ echo $teststr | grep -Po 'ty\.$'</code></pre> <p>The result is:</p> <pre> <code class="language-java">ty.</code></pre> <p>Again, note the user of the escape metacharacter (<code>\</code>) to declare the <code>.</code> (dot) character as a regular character.</p> <h3>How to find occurrences of a character using the metacharacters for matching numerals</h3> <p>The following example uses the <code>\d</code> metacharacter to create a regular expression that looks for matches of any numeral in a given piece of text.</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '\d'</code></pre> <p>Because each numeral is matched and returned on a one-by-one basis, the output sent to the terminal is:</p> <pre> <code class="language-java">9 2</code></pre> <h3>How to find a string using metacharacters for a numeral and a space</h3> <p>The following example uses the <code>\d </code>and <code>\s</code> metacharacters along with regular characters to create a regular expression that matches text according to the following logic: <em>Match any numeral that is followed by a space and then the regular characters </em><strong><em>cats</em></strong>.</p> <p>The <code>\d</code> metacharacter matches a numeral and the <code>\s</code> metacharacter matches a whitespace character (a space, a tab, or a few other rare characters):</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '\d\scats'</code></pre> <p>The result is:</p> <pre> <code class="language-java">9 cats</code></pre> <h3>How to combine metacharacters to create a complex regular expression</h3> <p>The following example uses the <code>\d</code> metacharacter to match a numeral, <code>\s</code> to match a space, and <code>.</code> (dot) to match any character. The regular expressions uses the <code>*</code> metacharacter to say, <em>Match zero or more successive occurrences of the preceding character.</em></p> <p>The logic expressed in the regular expression is this: <em>Find a string of text that starts with a numeral followed by a space character and the regular characters <strong>cats.</strong> Then keep going, matching any characters until you come to another numeral followed by a space character and the regular characters <strong>dogs</strong></em>:</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '\d\scats.*\d\sdogs'</code></pre> <p>The result is:</p> <pre> <code class="language-java">9 cats and 2 dogs</code></pre> <h3>How to traverse a line of text to a stop point</h3> <p>The following example uses the <code>.</code> (dot) metacharacter and <code>*</code> along with the regular characters <code>cats</code> to create a regular expression with the following logic: <em>Match any character zero or more times until you come to the characters <strong>cats</strong></em>:</p> <pre> <code class="language-bash">$ teststr="There are 9 cats and 2 dogs in a box." $ echo $teststr | grep -Po '.*cats'</code></pre> <p>The result is:</p> <pre> <code class="language-java">There are 9 cats</code></pre> <p>The interesting thing about this regular expression is that starting from the beginning of the line is implicit. The <code>^</code> metacharacter could be used to indicate the start of a line, but because the regular expression matches any characters until you come to <code>cats</code>, it isn't necessary to explicitly declare the start of the line using <code>^</code>. The regular expression starts processing from the beginning of the line by default.</p> <h2>Regular expressions uncover patterns in text</h2> <p>Regular expressions offer a powerful yet concise way to do complex text filtering. You can use them in programming languages such as JavaScript, Python, Perl, and C++, and directly in a Linux terminal to process files and text using the <code>grep</code> command, as demonstrated in this article.</p> <p>Getting the hang of regular expressions takes time. Mastering the intricacies of working with the metacharacters alone can be daunting. Fortunately, the learning curve is developmental. You don't have to master the entirety of regular expressions to work with them usefully as a beginner. You can start with the basics, and as you learn more you can do more. Just being able to do pattern matching using the basic examples shown in this article can provide immediate benefit.</p> <p>An upcoming article in this series will explain regular expression features that are even more powerful.</p> The post <a href="https://developers.redhat.com/articles/2022/09/14/beginners-guide-regular-expressions-grep" title="A beginner’s guide to regular expressions with grep">A beginner’s guide to regular expressions with grep</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Bob Reselman2022-09-14T07:00:00ZRemote dev-watch development with WildFly Jar Maven PluginEmmanuel Hugonnethttps://wildfly.org//news/2022/09/14/Remote-dev-watch/2022-09-14T00:00:00ZThe 8.0.0.Alpha2 version of the has been released. This is not yet Final, as it is only there to gather feedback on a new feature that simplifies development on the "cloud" using the dev-watch goal. For people who are not familiar with WildFly bootable JAR and its dev-watch goal, I strongly recommend that you read this that covers it in details. DEV-WATCH GOAL The current dev-watch goal, although offering an efficient workflow to develop WildFly applications, requires the bootable application or server to run locally, in the same place as the project. The improvement made on this release is to allow the bootable application or server to run remotely so that it can be in an environment that is closer to the target runtime environment. We are going to use to see how we can work remotely. Important This application applies the script anonymous-management.cli which disable security on the Management API of WildFly, please make sure not to include it when going to production. DEVELOPPING WITH A DOCKER CONTAINER. BUILD AND RUN THE APPLICATION WITH DOCKER The first step is to create the container image where the application is running. For this we are going to use a very simple Dockerfile: FROM registry.access.redhat.com/ubi8/openjdk-11:latest COPY --chown=jboss:root target/*.jar /deployments/. RUN chmod -R ug+rwX /deployments/. To build that container image we are executing: $ mvn clean install $ podman build -f Dockerfile -t remote-microprofile-config:latest And then we are going to run the container and expose the ports 8080 and 9990: $ podman run -p 8080:8080 -p 9990:9990 -it remote-microprofile-config:latest DEVELOP AND UPDATE THIS APPLICATION Now we need to run the dev-watch goal and remotely attach to the Wildfly Management API. For this we need to execute the following command line: $ mvn org.wildfly.plugins:wildfly-jar-maven-plugin:8.0.0.Alpha2:dev-watch \ -Dwildfly.bootable.remote=true \ -Dwildfly.bootable.remote.username=admin \ -Dwildfly.bootable.remote.password=passW0rd! \ -Dwildfly.hostname=${container.ip.address} Check that the application is running properly : $ curl http://${container.ip.address}:8080 config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Value for config2 comes from a properties file inside the application config3 = Default value for config3 comes from my code Once this is done you can edit the code and your changes will be automatically pushed to the remote container. For example: * Change the config2 property value to be "Hello from dev-watch remote" in the file: src/main/resources/META-INF/microprofile-config.properties. * Save your changes * The application is redeployed and the new configuration will be taken into account: $ curl http://${container.ip.address}:8080 config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Hello from dev-watch remote config3 = Default value for config3 comes from my code DEVELOPPING ON OPENSHIFT. BUILD AND RUN THE APPLICATION WITH OPENSHIFT We first need to build the application : $ mvn clean install Then to deploy it you need to drag and drop the produced remote-microprofile-config-bootable.jar on the Topology page on OpenShift. Now we need to expose the management API of WilFly by first editing the service to add a TCP port for 9990, and then add a route to that port: $ oc create route edge management-remote-microprofile-config-bootable --service=remote-microprofile-config-bootable --port=9990 --insecure-policy='Redirect' DEVELOP AND UPDATE THIS APPLICATION Now we need to run the dev-watch goal and remotely attach to the Wildfly Management API. For this we need to execute the following command line: $ mvn -P bootable-jar-remote -Dwildfly.hostname=$(oc get route management-remote-microprofile-config-bootable --template='{{ .spec.host }}') install You may also use a command like this one: $ mvn org.wildfly.plugins:wildfly-jar-maven-plugin:8.0.0.Alpha2:dev-watch \ -Dwildfly.bootable.remote=true \ -Dwildfly.port=443 \ -Dwildfly.bootable.remote.protocol=remote+https \ -Dwildfly.hostname=$(oc get route management-remote-microprofile-config-bootable --template='{{ .spec.host }}') Check that the application is running properly : $ curl https://$(oc get route remote-microprofile-config-bootable --template='{{ .spec.host }}') config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Value for config2 comes from a properties file inside the application config3 = Default value for config3 comes from my code Once this is done you can edit the code and your changes will be automatically pushed to the OpenShift instance. For example: * Change the config2 property value to be "Hello from dev-watch remote" in the file: src/main/resources/META-INF/microprofile-config.properties. * Save your changes * The application is redeployed and the new configuration will be taken into account: $ curl https://$(oc get route remote-microprofile-config-bootable --template='{{ .spec.host }}') config1 = Value from Config1 comes from an env var in the DeploymentConfig config2 = Hello from dev-watch remote config3 = Default value for config3 comes from my code CONCLUSION We hope that you are seeing the benefits of the new features that this release is bringing. We would really appreciate your on the dev-watch goal. We aim toward a smooth and efficient first class WildFly developer experience and we need you there! Thank-you.Emmanuel HugonnetKafka Monthly Digest: August 2022Mickael Maison195c9344-8250-48a9-9554-85515b7d00a92022-09-13T07:00:00Z2022-09-13T07:00:00Z<p>This 55th edition of the <a href="https://developers.redhat.com/topics/kafka-kubernetes">Kafka</a> Monthly Digest covers what happened in the <a href="https://kafka.apache.org/">Apache Kafka</a> community in August 2022.</p> <p>For last month’s digest, see <a href="https://developers.redhat.com/articles/2022/08/04/kafka-monthly-digest-july-2022">Kafka Monthly Digest: July 2022</a>.</p> <h2>Releases</h2> <p>There is currently one release in progress, 3.3.0.</p> <h3>3.3.0</h3> <p>The release process for 3.3.0 continued. José Armando García Sancio published the first release candidate on August 29. A few issues, including <a href="https://issues.apache.org/jira/browse/KAFKA-14187">KAFKA-14187</a> and <a href="https://issues.apache.org/jira/browse/KAFKA-14156">KAFKA-14156</a>, were found during testing, so José built RC1 on September 1. The vote is currently ongoing. You can find the <a href="https://cwiki.apache.org/confluence/display/KAFKA/Release+Plan+3.3.0">release plan</a> in the wiki.</p> <h2>Kafka Improvement Proposals</h2> <p>Last month, the community submitted three <a href="https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals">Kafka Improvement Proposals (KIPs)</a> (KIP-863 to KIP-865). I'll highlight a couple of them:</p> <ul> <li><p><a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-864%3A+Add+End-To-End+Latency+Metrics+to+Connectors">KIP-864: Add End-To-End Latency Metrics to Connectors</a>. This KIP proposes adding a few new metrics to track end-to-end latency for records flowing through Connect. This would also include metrics tracking the time spent in converters.</p></li> <li><p><a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-865%3A+Support+--bootstrap-server+in+kafka-streams-application-reset">KIP-865: Support --bootstrap-server in kafka-streams-application-reset</a>. This very small KIP aims at addressing a discrepancy with the <code>kafka-streams-application-reset.sh</code> tool. This tool currently uses the <code>--bootstrap-servers</code> flag, while all other tools use <code>--bootstrap-server</code>, so it will be updated for consistency.</p></li> </ul> <h2>Community releases</h2> <ul> <li><a href="https://github.com/tchiotludo/akhq/releases/tag/0.22.0">akhq 0.22</a>: AKHQ is a GUI for Apache Kafka. This new version adds a few new features, including support for listing ACLs on Cluster and TransactionalIds and sending Protobuf records via the UI.</li> <li><a href="https://github.com/tulios/kafkajs/releases/tag/v2.2.0">kafkajs 2.2.0</a>: Kafkajs is a pure JavaScript Kafka client for Node.js. This release adds support for triggering and listing partition reassignments in its Admin API and contains a few fixes.</li> </ul> <h2>Blogs</h2> <p>I selected some interesting blog articles that were published last month:</p> <ul> <li><a href="https://towardsdatascience.com/machine-learning-streaming-with-kafka-debezium-and-bentoml-c5f3996afe8f">Machine Learning Streaming with Kafka, Debezium, and BentoML</a></li> <li><a href="https://medium.com/event-driven-utopia/building-cqrs-views-with-debezium-kafka-materialize-and-apache-pinot-part-1-4f697735b2e4">Building CQRS Views with Debezium, Kafka, Materialize, and Apache Pinot — Part 1</a></li> <li><a href="https://medium.com/event-driven-utopia/building-cqrs-views-with-debezium-kafka-materialize-and-apache-pinot-part-2-6899e9efc74e">Building CQRS Views with Debezium, Kafka, Materialize, and Apache Pinot — Part 2</a></li> </ul> <p>To learn more about Kafka, visit <a href="https://developers.redhat.com/topics/kafka-kubernetes">Red Hat Developer's Apache Kafka topic page</a>.</p> The post <a href="https://developers.redhat.com/articles/2022/09/13/kafka-monthly-digest-august-2022" title="Kafka Monthly Digest: August 2022">Kafka Monthly Digest: August 2022</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Mickael Maison2022-09-13T07:00:00Z +JBoss Tools Aggregated FeedJBoss Tools Aggregated FeedJBoss ToolsJoin the Red Hat team at NodeConf EU 2022Lucas Holmquistaf2d04c4-c425-4b66-b2db-7b7fc5ba23c02022-09-23T07:00:00Z2022-09-23T07:00:00Z<p>It's that time of the year again, and NodeConf EU is almost upon us. This annual event is one of the leading <a data-entity-substitution="canonical" data-entity-type="node" data-entity-uuid="43652567-d1ab-4765-a588-4e905032ad7f" href="https://developers.redhat.com/topics/nodejs" title="Node.js: Develop server-side JavaScript applications">Node.js</a> events in Europe. It brings together contributors and innovators from the Node.js community to deliver a wide range of talks and workshops.</p> <p>The conference will be back in person this year after being virtual for the past two years on October 3rd–5th in Kilkenny, Ireland.</p> <p>The Node.js team here at Red Hat will be talking about lesser-known Node.js Core modules as well as guiding attendees through a workshop that will get you familiar with cloud-native development with Node.js. </p> <h2>Talk: Journey into mystery: Lesser-known Node Core modules and APIs</h2> <p>Wednesday, October 4th, 2022, 9:30 UTC</p> <p>Presenter: Luke Holmquist (<a href="https://twitter.com/sienaluke">@sienaluke</a>), Senior Software Engineer, Red Hat</p> <p>One of the key concepts of Node.js is its modular architecture, and Node makes it very easy to use a wide variety of modules and <a href="https://developers.redhat.com/topics/api-management">APIs</a> from the community. Some of the modules and APIs that are part of Node.js Core are very familiar, like HTTP and Events. But what about those lesser-known core modules just waiting to be used? This talk will journey into mystery as we explore some of the lesser-known Core modules and APIs that Node.js offers.</p> <h2>Workshop: Elevating Node.js applications to the cloud</h2> <p>Wednesday, October 4th, 2022, 3:00 UTC</p> <p>Presenters:</p> <ul> <li>Bethany Griggs, Senior Software Engineer, Red Hat</li> <li>Michael Dawson (<a href="https://twitter.com/mhdawson1">@mhdawson1</a>), Node.js Lead, Red Hat</li> <li>Luke Holmquist (<a href="https://twitter.com/sienaluke">@sienaluke</a>), Senior Software Engineer, Red Hat</li> </ul> <p>This workshop provides an introduction to cloud-native development with Node.js. We will walk you through building cloud-native Node.js applications, incorporating typical components, including observability components for <a href="https://developers.redhat.com/articles/2021/05/10/introduction-nodejs-reference-architecture-part-2-logging-nodejs">logging</a>, metrics, and more. Next, we'll show you how to deploy your application to cloud environments. The workshop will cover cloud-native concepts and technologies, including health checks, metrics, building <a href="https://developers.redhat.com/topics/containers">containers</a>, and deployments to <a href="https://developers.redhat.com/topics/kubernetes">Kubernetes</a>.</p> <p>For a full list of the various talks and workshops, check out the <a href="https://www.nodeconf.eu/agenda">NodeConf EU 2022 agenda</a>.</p> <h2>Collaborator Summit</h2> <p>There will also be a OpenJS Collaborator Summit in Dublin, Ireland on October 1-2, 2022, two days before NodeConf EU. We hope to see you there to discuss all things <a href="https://developers.redhat.com/topics/javascript">JavaScript</a> and Node.js. Our team members will be leading or active participants in many sessions.</p> <p>The Collab Summit is for maintainers or core contributors of an OpenJS project, plus any open source enthusiast interested in participating. This is the time for deep dives on important topics and to meet with people working across your favorite JavaScript projects. Get more details on the <a href="https://openjsf.org/blog/2022/09/01/openjs-collaborator-summit-join-us-in-dublin-virtual-october-1-2%EF%BF%BC/">OpenJS website</a>.</p> <h2>More Node.js resources</h2> <p>Don't miss the latest installments of our series on the <a href="https://developers.redhat.com/blog/2021/03/08/introduction-to-the-node-js-reference-architecture-part-1-overview">Node.js reference architecture</a>.</p> <p>If you want to learn more about Red Hat and IBM’s involvement in the Node.js community and what we are working on, check out our topic pages at <a href="https://developers.redhat.com/topics/nodejs">Red Hat Developer</a> and <a href="https://developer.ibm.com/languages/node-js/">IBM Developer</a>. </p> The post <a href="https://developers.redhat.com/articles/2022/09/23/join-red-hat-team-nodeconf-eu-2022" title="Join the Red Hat team at NodeConf EU 2022">Join the Red Hat team at NodeConf EU 2022</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Lucas Holmquist2022-09-23T07:00:00ZCreating your first cloud-agnostic serverless application with JavaHelber Belmirohttps://blog.kie.org/2022/09/creating-your-first-cloud-agnostic-serverless-application-with-java.html2022-09-22T10:35:00ZIf you are new to Serverless Workflow or serverless in general, creating a simple application for a serverless infrastructure is a good place to start. In this article, you will run through the steps to create your first serverless Java application that runs on any cloud. WHAT IS SERVERLESS? Contrary to what the name says, there are still servers in serverless, but you don’t need to worry about managing them. You just need to deploy your containers and the serverless infrastructure is responsible for providing resources to your application scale up or down. The best part is that it automatically scales up when there is a high demand or scales to zero when there is no demand. This will reduce the amount of money you spend with the cloud. WHAT WILL YOU CREATE? You will use Quarkus to create a simple Java application that returns a greeting message to an HTTP request and deploy it to Knative. WHY KNATIVE? In the beginning, serverless applications used to consist of small pieces of code that were run by a cloud vendor, like AWS Lambda. In this first phase, the applications had some limitations and were closely coupled to the vendor libraries. Knative enables developers to run serverless applications on a Kubernetes cluster. This gives you the flexibility to run your applications on any cloud, on-premises, or even mix all of them. WHY QUARKUS? Because serverless applications need to start fast. Since the biggest advantage of serverless is scale up and down (even zero) according to demand, serverless applications need to start fast when scaling up, otherwise, requests would be denied. One of the greatest characteristics of Quarkus applications is their super fast start-up. Also, Quarkus is , which means that it’s easy to deploy Quarkus applications to Kubernetes without having to understand the intricacies of the underlying Kubernetes framework. REQUIREMENTS * A local Knative installation. See . * This article uses minikube as the local Kubernetes cluster. * kn CLI installed. See . * JDK 11+ installed with JAVA_HOME configured appropriately. * Apache Maven 3.8.1+. * GraalVM (optional to deploy a native image). CREATE A QUARKUS APPLICATION > NOTE: If you don’t want to create the application, you can just clone it > from  and skip to  mvn io.quarkus.platform:quarkus-maven-plugin:2.11.2.Final:create \ -DprojectGroupId=org.acme \ -DprojectArtifactId=knative-serving-quarkus-demo cd knative-serving-quarkus-demo RUN YOUR APPLICATION LOCALLY To verify that you created the project correctly, run the project locally by running the following command: mvn quarkus:dev After downloading the dependencies and building the project, you should see an output similar to: __ ____ __ _____ ___ __ ____ ______ --/ __ \/ / / / _ | / _ \/ //_/ / / / __/ -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \ --\___\_\____/_/ |_/_/|_/_/|_|\____/___/ 2022-08-15 16:50:25,135 INFO [io.quarkus] (Quarkus Main Thread) knative-serving-quarkus-demo 1.0.0-SNAPSHOT on JVM (powered by Quarkus 2.11.2.Final) started in 1.339s. Listening on: http://localhost:8080 2022-08-15 16:50:25,150 INFO [io.quarkus] (Quarkus Main Thread) Profile dev activated. Live Coding activated. 2022-08-15 16:50:25,150 INFO [io.quarkus] (Quarkus Main Thread) Installed features: [cdi, resteasy-reactive, smallrye-context-propagation, vertx] On a different terminal window or in the browser, you can access the application by sending a request to the  endpoint: curl -X 'GET' 'http://localhost:8080/hello' -H 'accept: text/plain' If you see the following output, then you have successfully created your application: Hello from RESTEasy Reactive Hit Ctrl + C to stop the application. PREPARE YOUR APPLICATION FOR DEPLOYMENT TO KNATIVE ADD THE REQUIRED DEPENDENCIES Add the following dependencies to the pom.xml file: <dependency> <groupId>io.quarkus</groupId> <artifactId>quarkus-kubernetes</artifactId> </dependency> <dependency> <groupId>io.quarkus</groupId> <artifactId>quarkus-container-image-jib</artifactId> </dependency> CONFIGURE THE APPLICATION FOR DEPLOYMENT TO KNATIVE Add the following configuration to the src/main/resources/application.properties file: quarkus.kubernetes.deployment-target=knative quarkus.container-image.group=dev.local/hbelmiro > NOTE: In the quarkus.container-image.group property, replace hbelmiro with > your container registry username. DEPLOY YOUR APPLICATION TO KNATIVE START THE MINIKUBE TUNNEL > NOTE: This step is only necessary if you are using minikube as the local > Kubernetes cluster. On a different terminal window, run the following command to start the minikube tunnel: minikube tunnel --profile knative You should see an output similar to the following: Status: machine: knative pid: 223762 route: 10.96.0.0/12 -> 192.168.49.2 minikube: Running services: [kourier] errors: minikube: no errors router: no errors loadbalancer emulator: no errors Leave the terminal window open and running the above command. CONFIGURE THE CONTAINER CLI TO USE THE CONTAINER ENGINE INSIDE MINIKUBE eval $(minikube -p knative docker-env) DEPLOY THE APPLICATION Run the following command to deploy the application to Knative: mvn clean package -Dquarkus.kubernetes.deploy=true You should see an output similar to the following: [INFO] [io.quarkus.kubernetes.deployment.KubernetesDeployer] Deploying to knative server: https://192.168.49.2:8443/ in namespace: default. [INFO] [io.quarkus.kubernetes.deployment.KubernetesDeployer] Applied: Service knative-serving-quarkus-demo. [INFO] [io.quarkus.deployment.QuarkusAugmentor] Quarkus augmentation completed in 8952ms [INFO] ------------------------------------------------------------------------ [INFO] BUILD SUCCESS [INFO] ------------------------------------------------------------------------ CHECK THE KNATIVE DEPLOYED SERVICES Run the following command to check the Knative deployed services: kn service list You should see your application listed on the deployed services like the following: NAME URL LATEST AGE CONDITIONS READY REASON knative-serving-quarkus-demo http://knative-serving-quarkus-demo.default.10.106.207.219.sslip.io knative-serving-quarkus-demo-00001 23s 3 OK / 3 True > IMPORTANT: In the above output, check the READY status of the application. If > the status is not True, then you need to wait for the application to be ready, > or there is a problem with the deployment. SEND A REQUEST TO THE DEPLOYED APPLICATION Use the URL returned by the above command to send a request to the deployed application. curl -X 'GET' 'http://knative-serving-quarkus-demo.default.10.106.207.219.sslip.io/hello' -H 'accept: text/plain' You should see the following output: Hello from RESTEasy Reactive GOING NATIVE You can create a native image of your application to make it start even faster. To do that, deploy your application by using the following command: mvn clean package -Pnative -Dquarkus.native.native-image-xmx=4096m -Dquarkus.native.remote-container-build=true -Dquarkus.kubernetes.deploy=true > IMPORTANT: -Dquarkus.native.native-image-xmx=4096m is the amount of memory > Quarkus can use to generate the native image. You should adjust it or > completely remove it depending on your local machine’s specifications. NOW YOU ARE READY TO RUN SERVERLESS APPLICATIONS USING JAVA Easy, isn’t it? Quarkus and Knative give you the freedom to run serverless applications using Java on-premises or in the cloud, no matter the vendor. You can even mix more than one cloud vendor with your on-premises infrastructure. This flexibility brings you agility and reduces your costs with infrastructure. NEXT STEP If you want to go further on serverless with more exciting stuff, check out  The post appeared first on .Helber BelmiroLearn about the new BGP capabilities in Red Hat OpenStack 17Daniel Alvarez Sanchezd3766211-f376-45f2-b86d-2b3cbe44900a2022-09-22T07:00:00Z2022-09-22T07:00:00Z<p>The <a href="https://www.redhat.com/en/technologies/linux-platforms/openstack-platform">Red Hat OpenStack Platform</a> is an Infrastructure-as-a-Service (IaaS) offering from Red Hat. Version 17.0 of the platform includes dynamic routing for both the control and data planes. This lets you deploy a cluster in a pure layer-3 (L3) data center, overcoming the scaling issues of traditional layer-2 (L2) infrastructures such as large failure domains, large broadcast traffic, or long convergence times in the event of failures.</p> <p>This article will illustrate this new feature by outlining a simple three-rack spine and leaf topology, where the layer-2 boundaries are within each rack on the Red Hat OpenStack Platform. The control plane spans the three racks, and each rack also hosts a compute node. Figure 1 illustrates our topology.</p> <div class="rhd-c-figure"> <article class="align-center media media--type-image media--view-mode-article-content"> <div class="field field--name-image field--type-image field--label-hidden field__items"> <a href="https://developers.redhat.com/sites/default/files/fig1_12.png" data-featherlight="image"><img src="https://developers.redhat.com/sites/default/files/styles/article_floated/public/fig1_12.png?itok=5G4F2AYN" width="600" height="285" alt="Diagram showing two leaf nodes connecting each control node to the spines." loading="lazy" typeof="Image" /> </a> </div> <div class="field field--name-field-caption field--type-string field--label-hidden field__items"> <div class="rhd-c-caption field__item"> Figure 1: Two leaf nodes connect each control node to the spines. </div> </div> </article> </div> <p>The main characteristics of this deployment are:</p> <ul> <li>Border Gateway Protocol (BGP) is running on every element in the network: controllers, computes, leaves, and spine. The Red Hat OpenStack Platform uses <a href="https://frrouting.org/">FRRouting</a> (FRR) to enable BGP in the overcloud nodes, and it operates here as follows: <ul> <li>Leaves are configured as route reflectors, re-advertising learned routes to the spine.</li> <li>The IPv6 link-local address of each interface uses <em>BGP Unnumbered</em> to establish BGP sessions. There is no need to assign and configure unique IP addresses on these interfaces, simplifying the deployment.</li> <li>FRR advertises all local IP addresses (that is, /32 on IPv4 or /128 on IPv6) as directly connected host routes.</li> </ul> </li> <li>Each device has outgoing default <a href="https://study-ccna.com/ecmp-equal-cost-multi-path/">equal-cost multi-path routing</a> (ECMP) routes for load balancing and high availability (no L2 bonds).</li> <li><a href="https://datatracker.ietf.org/doc/rfc5880/">Bidirectional Forwarding Detection</a> (BFD), which is <a href="https://opendev.org/openstack/tripleo-ansible/src/commit/7da489819193352f009949f10fe988809a607ab7/tripleo_ansible/roles/tripleo_frr/defaults/main.yml#L23-L32">configurable</a>, is used for network failure detection for fast convergence times.</li> <li><a href="https://docs.hpc.cam.ac.uk/cloud/userguide/02-neutron.html">OpenStack Neutron</a> and <a href="https://www.ovn.org/en/">Open Virtual Network</a> (OVN) are agnostic and require no changes or configuration. <ul> </ul> </li> </ul> <h3>Constraints and limitations</h3> <p>Before we move on, it's worth noting the constraints and limitations of the implementation shown in this article:</p> <ul> <li>This feature will only work with the Neutron <a href="https://docs.openstack.org/neutron/latest/admin/config-ml2.html">ML2/OVN</a> mechanism driver.</li> <li>Workloads in provider networks and floating IP addresses are advertised. Routes to these workloads go directly to the compute node hosting the virtual machine (VM).</li> <li>Tenant networks can <a href="https://opendev.org/openstack/tripleo-ansible/src/commit/2381a7c3b246713744ab259ea8ac22be826344cb/tripleo_ansible/roles/tripleo_frr/defaults/main.yml#L69">optionally be advertised</a>, but: <ul> <li>Overlapping CIDRs are not supported. Tenants need to ensure uniqueness (e.g., through the use of <a href="https://docs.openstack.org/neutron/wallaby/admin/config-address-scopes.html">address scopes</a>).</li> <li>Traffic to workloads in tenant networks traverses the gateway node.</li> </ul> </li> <li>An <a href="https://opendev.org/x/ovn-bgp-agent">agent</a> is required to run on each overcloud node. This agent is responsible for steering the traffic to or from the OVN overlay, as well as triggering FRR to advertise the IPv4 or IPv6 addresses of the workloads.</li> <li>The provider bridge (typically <code>br-ex</code> or <code>br-provider</code>) is not connected to a physical NIC or bond. Instead, egress traffic from the local VMs is processed by an extra routing layer in the Linux kernel. Similarly, ingress traffic is processed by this extra routing layer and forwarded to OVN through the provider bridge.</li> <li>There is no support for datapath acceleration, because the agent relies on kernel networking to steer the traffic between the NICs and OVN. Acceleration mechanisms such as <a href="https://docs.openvswitch.org/en/latest/intro/install/dpdk/">Open vSwitch with DPDK</a> or <a href="https://docs.openstack.org/neutron/rocky/admin/config-ovs-offload.html">OVS hardware offloading</a> are not supported. Similarly, <a href="https://www.networkworld.com/article/3535850/what-is-sr-iov-and-why-is-it-the-gold-standard-for-gpu-sharing.html">SR-IOV</a> is not compatible with this configuration because it skips the hypervisor.</li> </ul> <h2>Control plane</h2> <p>With this configuration, the control plane no longer has to be in the same L3 network as the endpoints, because endpoints are advertised via BGP and traffic is <em>routed</em> to the nodes hosting the services.</p> <p><em>High availability</em> (HA) is provided fairly simply. Instead of announcing the VIP location upon failover by sending broadcast GARPs to the upstream switch, <a href="https://clusterlabs.org/pacemaker/doc/2.1/Pacemaker_Explained/singlehtml/">Pacemaker</a> just configures the VIP addresses in the loopback interface, which triggers FRR to advertise a directly connected host route to it.</p> <h3>Sample traffic route</h3> <p>Let's take the example of the control plane's <a href="http://www.haproxy.org">HAproxy </a>endpoint and check its Pacemaker configuration:</p> <pre> <code class="language-java">[root@ctrl-1-0 ~]# pcs constraint colocation config Colocation Constraints: ip-172.31.0.1 with haproxy-bundle (score:INFINITY) [root@ctrl-1-0 ~]# pcs resource config ip-172.31.0.1 Resource: ip-172.31.0.1 (class=ocf provider=heartbeat type=IPaddr2) Attributes: cidr_netmask=32 ip=172.31.0.1 nic=lo Meta Attrs: resource-stickiness=INFINITY Operations: monitor interval=10s timeout=20s (ip-172.31.0.1-monitor-interval-10s) start interval=0s timeout=20s (ip-172.31.0.1-start-interval-0s) stop interval=0s timeout=20s (ip-172.31.0.1-stop-interval-0s) [root@ctrl-1-0 ~]# ip addr show lo 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet 172.31.0.1/32 scope global lo valid_lft forever preferred_lft forever ...</code></pre> <p>After Pacemaker configures the VIP in one of the nodes, it configures this IP address in the <code>lo</code> interface, triggering FRR to advertise a directly connected route on that node:</p> <pre> <code class="language-java">[root@ctrl-1-0 ~]# podman exec -it frr vtysh -c "show ip bgp" | grep 172.31.0.1 *> 172.31.0.1/32 0.0.0.0 0 0 32768 ?</code></pre> <p>Now we can explore the route to this IP address, which is hosted by <code>ctrl-1-0</code>, from the <code>leaf-2-1</code> leaf node in <code>rack-2</code>:</p> <pre> <code class="language-java"># for i in leaf-2-1 spine-2 spine-1 leaf-1-1 leaf-1-2; do ssh $i ip route show 172.31.0.1; done Warning: Permanently added 'leaf-2-1' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 330 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:fefe:158a dev eth2 weight 1 nexthop via inet6 fe80::5054:ff:fe55:bdf dev eth1 weight 1 Warning: Permanently added 'spine-2' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 161 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:feb4:d2d0 dev eth3 weight 1 nexthop via inet6 fe80::5054:ff:fec5:7bad dev eth2 weight 1 Warning: Permanently added 'spine-1' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 439 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:fe6f:466b dev eth3 weight 1 nexthop via inet6 fe80::5054:ff:fe8d:c63b dev eth2 weight 1 Warning: Permanently added 'leaf-1-1' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 142 via 100.65.1.2 dev eth3 proto bgp metric 20 Warning: Permanently added 'leaf-1-2' (ECDSA) to the list of known hosts. 172.31.0.1 nhid 123 via 100.64.0.2 dev eth3 proto bgp metric 20</code></pre> <p>Traffic directed to the OpenStack control plane VIP (172.31.0.1) from <code>leaf-2-1</code> goes through either the <code>eth1</code> (on <code>spine-1</code>) or <code>eth2</code> (on <code>spine-2</code>) ECMP routes. The traffic continues from <code>spine-1</code> on ECMP routes again to <code>leaf-1-1</code> , or from <code>spine-2</code> to <code>leaf1-2</code>. Finally, the traffic goes through <code>eth3</code> to the controller hosting the service, <code>ctrl-1-0</code>.</p> <h3>High availability through BFD</h3> <p>As mentioned earlier, BFD is running in the network to detect network failures. In order to illustrate its operation, following the example in the previous section, let's take down the NIC in <code>leaf-1-1</code> that connects to the controller node, and see how the routes adjust on the <code>spine-1</code> node to go through the other leaf in the same rack.</p> <p>Initially, there is an ECMP route in the <code>spine-1</code> node to the VIP that sends the traffic to both leaves in rack 1:</p> <pre> <code class="language-java">[root@spine-1 ~]# ip route show 172.31.0.1 172.31.0.1 nhid 179 proto bgp metric 20 nexthop via inet6 fe80::5054:ff:fe6f:466b dev eth3 weight 1 nexthop via inet6 fe80::5054:ff:fe8d:c63b dev eth2 weight 1</code></pre> <p>Now let's bring down the interface that connects <code>leaf-1-1</code> to <code>ctrl-1-0</code>, which is hosting the VIP:</p> <pre> <code class="language-java">[root@leaf-1-1 ~]# ip link set eth3 down</code></pre> <p>The BFD state changes to <code>down</code> for this interface, and the route has been withdrawn in the spine, which now goes only through <code>leaf-1-2</code>:</p> <pre> <code class="language-java"> [root@leaf-1-1 ~]# tail -f /var/log/frr/frr.log | grep state-change 2022/09/08 12:14:47 BFD: [SEY1D-NT8EQ] state-change: [mhop:no peer:100.65.1.2 local:100.65.1.1 vrf:default ifname:eth3] up -> down reason:control-expired [root@spine-1 ~]# ip route show 172.31.0.1 172.31.0.1 nhid 67 via inet6 fe80::5054:ff:fe6f:466b dev eth3 proto bgp metric 20</code></pre> <p>Similarly, if we bring up the interface again, BFD will detect this condition and the ECMP route will be re-installed.</p> <p>The newly introduced <code>frr</code> container runs in all controller, network, and compute nodes. Its configuration can be queried through the following command:</p> <pre> <code class="language-bash">$ sudo podman exec -it frr vtysh -c 'show run'</code></pre> <h2>Data plane</h2> <p>The data plane refers here to the workloads running in the OpenStack cluster. This section describes the main pieces introduced in this configuration to allow VMs to communicate in a Layer-3 only datacenter.</p> <h3>OVN BGP Agent</h3> <p>The <a href="https://opendev.org/x/ovn-bgp-agent">OVN BGP Agent</a> is a <a href="https://developers.redhat.com/topics/python">Python</a>-based daemon that runs on every compute and network node. This agent connects to the OVN southbound database and keeps track of when a workload is spawned or shut down on a particular hypervisor. The agent then triggers FRR to advertise or withdraw its IP addresses, respectively. The agent is also responsible for configuring the extra routing layer between the provider bridge (<code>br-ex</code> or <code>br-provider</code>) and the physical NICs.</p> <h3>BGP advertisement</h3> <p>The same principle shown earlier for the control plane applies to the data plane. The difference is that for the control plane, Pacemaker configures the IP addresses to the loopback interface, whereas for the data plane, the OVN BGP Agent adds the addresses to a local <a href="https://access.redhat.com/solutions/5855721">VRF</a>. The VRF is used for isolation, because we don't want these IP addresses to interfere with the host routing table. We just want to trigger FRR to advertise and withdraw the addresses as appropriate (Figure 2).</p> <div class="rhd-c-figure"> <article class="align-center media media--type-image media--view-mode-article-content"> <div class="field field--name-image field--type-image field--label-hidden field__items"> <a href="https://developers.redhat.com/sites/default/files/fig2_10.png" data-featherlight="image"><img src="https://developers.redhat.com/sites/default/files/styles/article_floated/public/fig2_10.png?itok=fFZZXz67" width="600" height="479" alt="Diagram showing that the OVN BGP Agent controls FRR in order to advertise/withdraw routes." loading="lazy" typeof="Image" /> </a> </div> <div class="field field--name-field-caption field--type-string field--label-hidden field__items"> <div class="rhd-c-caption field__item"> Figure 2: The OVN BGP Agent controls FRR in order to advertise/withdraw routes. </div> </div> </article> </div> <div> </div> <h3>Traffic routing</h3> <p>As mentioned earlier, OVN has not been modified in any way to support this configuration. Thus, OVN believes that the L2 broadcast domain of the provider networks spans multiple hypervisors, but this is not true anymore. Both ingress and egress traffic require an extra layer of routing. The OVN BGP Agent is responsible for configuring this layer through the following actions:</p> <ol> <li> <p>Enable an ARP/NDP proxy in the provider bridge. Requests don't hit the destination because there's no L2 connectivity, so they're answered locally by the kernel:</p> <pre> <code class="language-bash">$ sysctl net.ipv4.conf.br-ex.proxy_arp net.ipv4.conf.br-ex.proxy_arp = 1 $ sysctl net.ipv6.conf.br-ex.proxy_ndp net.ipv6.conf.br-ex.proxy_ndp = 1</code></pre> </li> <li> <p>For ingress traffic, add host routes in the node to forward the traffic to the provider bridge:</p> <pre> <code class="language-bash">$ sudo ip rule show | grep br-ex 32000: from all to 172.24.100.217 lookup br-ex $ sudo ip route show table br-ex default dev br-ex scope link 172.24.100.217 dev br-ex scope link</code></pre> </li> <li> <p>For egress traffic, add flows that change the destination MAC address to that of the provider bridge, so that the kernel will forward the traffic using the default outgoing ECMP routes:</p> <pre> <code class="language-bash">$ ip link show br-ex 7: br-ex: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 link/ether 3e:cc:28:d7:10:4e brd ff:ff:ff:ff:ff:ff $ sudo ovs-ofctl dump-flows br-ex cookie=0x3e7, duration=48.114s, table=0, n_packets=0, n_bytes=0, priority=900,ip,in_port="patch-provnet-b" actions=mod_dl_dst:3e:cc:28:d7:10:4e,NORMAL cookie=0x3e7, duration=48.091s, table=0, n_packets=0, n_bytes=0, priority=900,ipv6,in_port="patch-provnet-b" actions=mod_dl_dst:3e:cc:28:d7:10:4e,NORMAL cookie=0x0, duration=255892.138s, table=0, n_packets=6997, n_bytes=1368211, priority=0 actions=NORMAL $ ip route show default default nhid 34 proto bgp src 172.30.2.2 metric 20 nexthop via 100.64.0.5 dev eth1 weight 1 nexthop via 100.65.2.5 dev eth2 weight 1 </code></pre> </li> </ol> <p>This example is for a VM on a provider network and applies as well to Floating IP addresses. However, for workloads in tenant networks, host routes are advertised from network and compute nodes using the Neutron gateway IP address as the next hop. From the gateway node, the traffic reaches the destination compute node through the Geneve tunnel (L3) as usual.</p> <h2>References</h2> <p>More information can be found at:</p> <ul> <li><a href="https://opendev.org/x/ovn-bgp-agent/src/commit/1fa471083c4fdbdac8d2781822c55eb7b8069fa2/doc/source/contributor/bgp_mode_design.rst">OVN BGP Agent upstream documentation</a></li> <li><a href="https://ltomasbo.wordpress.com/2021/02/04/ovn-bgp-agent-in-depth-traffic-flow-inspection/">OVN BGP Agent: In-depth traffic flow inspection blogpost</a></li> <li><a href="https://www.youtube.com/watch?v=eKH14UN856o">OpenInfra Summit Berlin '22 - Using BGP to interconnect workloads across clouds</a> (video)</li> <li><a href="https://www.youtube.com/watch?v=91daVTMt9AA">Devconf 2021 - Layer 3 Networking with BGP in hyperscale DCx</a> (video)</li> </ul> The post <a href="https://developers.redhat.com/articles/2022/09/22/learn-about-new-bgp-capabilities-red-hat-openstack-17" title="Learn about the new BGP capabilities in Red Hat OpenStack 17">Learn about the new BGP capabilities in Red Hat OpenStack 17</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Daniel Alvarez Sanchez2022-09-22T07:00:00ZThis Week in JBoss - 22 September 2022Romain Pelissehttps://www.jboss.org/people/romain-pelissedo-not-reply@jboss.comhttps://www.jboss.org/posts/weekly-2022-09-22.html2022-09-22T00:00:00Z<article class="" data-tags="quarkus,resteasy,kie,keycloak,wildfly"> <h1>This Week in JBoss - 22 September 2022</h1> <p class="preamble"></p><p><em>Hi everyone and welcome to the latest installment of JBoss editorial! Today’s stars of the show: Quarkus and KIE (Kogito/Drools)</em></p><p></p> <div class="sect1"> <h2 id="_quarkus">Quarkus</h2> <div class="sectionbody"> <p>Quarkus is quite busy this month! Just yesterday, the project released <a href="https://quarkus.io/blog/quarkus-2-12-3-final-released/">Quarkus 2.12.3.Final</a>, the third round of bugfixes and performance enhance of for the 2.12, which we mentioned in our previous editorial. But that’s not all, Quarkus tooling also got some love with the release of <a href="https://quarkus.io/blog/intellij-quarkus-tools-1.13.0/">Quarkus Tools for IntelliJ 1.13.0 released!</a>.</p> <p>Beyond the publication of new software and bugfixes, James Cobb also took the time to publish the 24th installment of the <a href="https://quarkus.io/newsletter/24/">Quarkus Newsletter</a>, a must-read for anyone who wants to follow or play with Quarkus! And to this point, an interesting new player has joined the project’s community: <a href="https://quarkus.io/blog/aphp-user-story/">Quarkus adoption by APHP (Assistance Publique des Hôpitaux de Paris)</a>!</p> <p>Of course, if you are already familiar with Quarkus, you may want something more technical to quench your thirst and Clément Escoffier has just the article for you: <a href="https://quarkus.io/blog/redis-job-queue/">How to implement a job queue with Redis</a>.</p> </div> </div> <div class="sect1"> <h2 id="_kie">KIE</h2> <div class="sectionbody"> <p>KIE community has been quite active too in the last days and produced quite an amount of interesting articles about their technology. First, we’ll suggest you’ll dive into this one about <a href="https://blog.kie.org/2022/09/creating-your-first-cloud-agnostic-serverless-application-with-java.html">Creating your first cloud-agnostic serverless application with Java</a>. It’s a good place to start!</p> <p>Another one, called <a href="https://blog.kie.org/2022/09/new-visualizer-for-the-serverless-workflow-editor.html">New visualizer for the Serverless Workflow Editor</a> provides a nice overview of this new tool and we’ll certainly learn more about it and use it. If you are more interested into technical details and implementation, you are in luck, there is a rather detailed overview of the <a href="https://blog.kie.org/2022/09/efesto-refactoring-technical-details.html">Efesto refactoring</a>.</p> <p>Wait, that’s not all! Check out this article, and the video it links to: <a href="https://blog.kie.org/2022/09/transparent-ml-integrating-drools-with-aix360.html">Transparent ML, integrating Drools with AIX360</a>!</p> </div> </div> <div class="sect1"> <h2 id="_techbytes">Techbytes</h2> <div class="sectionbody"> <p>If KIE and Quarkus have been the most prolific of the last two weeks, there is still a few more articles, coming from other projects, that you may want to check out: * <a href="http://www.mastertheboss.com/java/how-to-spot-java-bugs-with-spotbugs/">How to spot Java bugs with SpotBugs</a> * <a href="http://www.mastertheboss.com/jboss-frameworks/resteasy/getting-started-with-jakarta-restful-services/">Getting started with Jakarta RESTful Services</a> * <a href="https://infinispan.org/blog/2022/09/12/infinispan-14-console-wizard">Creating cache with wizard - Infinispan 14</a> * <a href="https://www.wildfly.org//news/2022/09/14/Remote-dev-watch/">Remote dev-watch development with WildFly Jar Maven Plugin</a> * <a href="https://blog.kie.org/2022/09/multiple-repositories-pull-request-chaos-crawl-them-all-in-one-single-place.html">Multiple repositories Pull Request chaos, crawl them all in one single place</a></p> </div> </div> <div class="sect1"> <h2 id="_releases_releases_releases">Releases, releases, releases…​</h2> <div class="sectionbody"> <p>As always, the JBoss community has been quite active and a few projects published new version in the last two weeks:</p> <div class="ulist"> <ul> <li> <p><a href="https://quarkus.io/blog/quarkus-2-5-3-final-released/">Quarkus 2.12.2.Final released</a> followed by <a href="https://quarkus.io/blog/quarkus-2-12-3-final-released/">Quarkus 2.12.3.Final</a></p> </li> <li> <p><a href="https://quarkus.io/blog/intellij-quarkus-tools-1.13.0/">Quarkus Tools for IntelliJ 1.13.0 released!</a></p> </li> <li> <p><a href="https://resteasy.dev/2022/09/08/resteasy-6.2.0.Beta1-release/">RESTEasy 6.2.0.Beta1 Release</a></p> </li> <li> <p><a href="https://www.keycloak.org/2022/09/keycloak-1902-released">Keycloak 19.0.2 released</a></p> </li> </ul> </div> </div> </div> <div class="sect1"> <h2 id="_decaf">Decaf'</h2> <div class="sectionbody"> <p>Feeling too jittery? Enough Java for now? Get refreshed with these two next articles about <strong>regular expressions</strong>:</p> <div class="ulist"> <ul> <li> <p><a href="https://developers.redhat.com/articles/2022/09/14/beginners-guide-regular-expressions-grep">A beginner’s guide to regular expressions with grep</a></p> </li> <li> <p><a href="https://developers.redhat.com/articles/2022/09/16/regex-how-quantifiers-pattern-collections-and-word-boundaries">Regex how-to: Quantifiers, pattern collections, and word boundaries</a></p> </li> </ul> </div> <p><em>That’s all for today! Please join us again next time for another round of our JBoss editorial!</em></p> </div> </div> <div class="author"> <pfe-avatar pfe-shape="circle" pfe-pattern="squares" pfe-src="/img/people/romain-pelisse.png"></pfe-avatar> <span>Romain Pelisse</span> </div></article>Romain PelisseEfesto refactoring &#8211; technical detailsGabriele Cardosihttps://blog.kie.org/2022/09/efesto-refactoring-technical-details.html2022-09-21T11:09:56ZThis post is meant as a description of the APIs and other technical details of the Efesto framework. It continues the introduction made in the BASE CONCEPTS. There are some concepts around which the APIs are implemented: * Generated resource * Unique identifier * Context of execution The framework provides and manage default implementations of the classes representing those concepts. Those classes could be extended by different engines for their specific needs (e.g. the Kie-drl compilation plugin define a context that contains a KnowledgeBuilderConfiguration) but this specific addition should never leak out of the engine itself, and the functionality of the framework itself should never rely on such "custom" details. GENERATED RESOURCE A represent the result of a compilation. By itself is just a marker interface because there are different kind of generated resources: * executable resources () * redirect resources () * “container” resources (like ). Executable resources represents the "entry point" for execution at runtime, and it contains information required to "instantiate" the executable unit. For some code-generation models (e.g. rules, predictions) this means store the class to instantiate at runtime, that will be used to start the evaluation. For models that does not rely on code-generation for execution (e.g. decisions), this resource contains the name of the class to be instantiated and/or the methods/parameters to be invoked. Redirect resources contains information needed to forward the execution request to a different engine, and it contains the informatio about the ewngine to be invoked. Container resources are meant to store other informations needed at runtime (e.g. the classes generated during compilation). UNIQUE IDENTIFIER The unique identifier () contains the information required to uniquely identify an executable or redirect generated resource. ModelLocalUriId contains information about: * the model/engine to invoke * the full path to the given resource The unique identifier is represented a "path" whose root is the model/engine to invoke, and the path describe all the elements required to get to the specific resource. Stateless engines (e.g. DMN, PMML) describe that as "/namespace/model_name" or "/filename/model_name". Statefull engines would require further path compoenents to identify the specific "state" to be invoked (e.g. "/drl/rule_base/session_name/session_identifier"). ModelLocalUriId is a property of both GeneratedExecutableResource and GeneratedRedirectResource, since both of them have to be retrieved during runtime execution. ModelLocalUriId implements and is a feature that was initially implemented in the Kogito Incubation API, for which an explanation is available . For each module, client code should be able to invoke a method like that to retrieve the unique identifier: ModelLocalUriId modelLocalUriId = appRoot("") .get(PmmlIdFactory.class) .get(fileNameNoSuffix, modelName); This is a fluent API, and each get invocation corresponds to an element in the generated path. The appRoot parameter is only used to differentiate multiple applications (e.g. in distributed context). The first get is needed to start the path building. Each module should implement its own factory extending , that, in turn, will be used to generate the full path. Each of the following get should return an object that extends ModelLocalUriId, since each it represent the path until that specific segment. Each module may provide its own strategy to define such paths, so each module may implement its own subclasses, depending on the needs. Since the The ModelLocalUriId constructor requires a instance, any of its subclasses should implement a way to call that constructor with such instance. In the following example: public class PmmlIdFactory implements ComponentRoot { public LocalComponentIdPmml get(String fileName, String name) { return new LocalComponentIdPmml(fileName, name); } } the PmmlIdFactory expose a get method ( the fluent API) that requires fileName and name parameters. This, in turns, are used to invoke the LocalComponentIdPmml constructor. public class LocalComponentIdPmml extends ModelLocalUriId { public static final String PREFIX = "pmml"; public LocalComponentIdPmml(String fileName, String name) { super(LocalUri.Root.append(PREFIX).append(fileName).append(name)); } } This snippet: LocalUri.Root.append(PREFIX).append(fileName).append(name) will lead to the creation of the following path: /{PREFIX}/{fileName}/{name} CONTEXT OF EXECUTION. The contains basic information about the current execution. It contains informations about the generated classes and the unique identifiers generated during compilation. is the specialization used at runtime to retrieve the generated classes. is the default implementation. Engines may extends the above as per their needs. For example, (the EfestoCompilationContext used inside the rule engine) defines KnowledgeBuilderConfiguration for its needs. COMPILATION CONTEXT is the specialization used at compile time, and it is used to store the classes generated during compilation. is the default implementation. provide a static method to retrieve the default implementation () with all the classes eventually compiled from a previous compilation. That static method, behind the scenes, invokes the constructor that scan the classloader to look for efesto-related classes. RUNTIME CONTEXT is the specialization used at runtime to retrieve the generated classes. is the default implementation. provide a static method to retrieve the default implementation () with all the efesto-related compiled classes. That static method, behind the scenes, invokes the constructor that scan the classloader to look for efesto-related classes. PUBLIC APIS The framework consists basically of two set of APIs, the "compilation" and the "runtime" ones. Those APIs are defined inside and . Those are the APIs that "client code" is expected to invoke. Said differently, "client code" is expected to interact with engines only through those APIs. COMPILATION API void processResource(EfestoCompilationContext context, EfestoResource... toProcess); This is the method that "External applications" (e.g. kie-maven-plugin) should invoke to create executables units out of given models. is the DTO wrapping a single model to be processed. Its only method T getContent(); is invoked by the compilation manager to get the object to be processed. The more common usage is to provide an actual File to the compilation manager, in which case there already is an implementation, . is a specific abstract implementations that wraps a Set of models. As for the previous, there already exist an implementation to manage FIles, . RUNTIME API Collection<EfestoOutput> evaluateInput(EfestoRuntimeContext context, EfestoInput... toEvaluate); This is the method that "External applications" (e.g. kogito execution) should invoke to retrieve a result out of executable units generated at compile-time. is the DTO wrapping a the data to be evaluated and the unique identifier of the executable units. It has two methods: ModelLocalUriId getModelLocalUriId(); T getInputData(); the former returns the unique identifier of the executable units; the latter returns the data to use for evaluation. Currently there are no "default" implementations of it, since the input structure is generally model-specific; so, every plugin should provide its own implementation. INTERNAL APIS Behind the scenes, when CompilationManager and RuntimeManager receives a request, they scan the classloader for engine plugins. Such plugins should implement, respectively, the and the . COMPILERSERVICE API declares three methods: boolean canManageResource(EfestoResource toProcess); List<E> processResource(EfestoResource toProcess, U context); String getModelType(); The first one is invoked by the CompilationManager to verify if the specific implementation is able to manage the given resource. The evaluation could be based on the actual type of the resource, on some details of the content, or on a mix of them. It is responsibility of the implementation to find the appropriate logic. The only requirement to keep in mind is that, during execution, there should be at most one implementation that return true for a given EfestoResource, otherwise an exception is thrown. The following snippet is an example where a given EfestoResource is considered valid if it is an DecisionTableFileSetResource: @Override public boolean canManageResource(EfestoResource toProcess) { return toProcess instanceof DecisionTableFileSetResource; } The above implementation works because DecisionTableFileSetResource is a class specifically defined by the plugin itself, so there are no possible "overlaps" with other implementations. On the other side, the following snippet is an example where a given EfestoResource is considered valid if it is an EfestoFileResource and if the contained model is a PMML: @Override public boolean canManageResource(EfestoResource toProcess) { return toProcess instanceof EfestoFileResource &amp;& ((EfestoFileResource) toProcess).getModelType().equalsIgnoreCase(PMML_STRING); } In this case, the actual class of EfestoResource is not enough, since EfestoFileResource is one of the default implementations provided by the framework. So, a further check is needed, that is about the model that is wrapped in the resource. A single plugin may manage multiple representations of the same model. For example, a plugin may manage both an EfestoFileResource and an EfestoInputStreamResource. There are different possible strategies to do that. For example, the plugin may provide one single "compilation-module" with two classes implementing the KieCompilerService; or it may define two "compilation-modules", each of which with one implementation, or one single class may manage both kind of inputs. Again, this is responsibility of the plugin itself. This also push toward code reusage. For a given model, there could be a common path that provide the final compilation output, and different entry point depending on the model representation. It is so possible that multiple compilation models creates a compilation output that, in turns, it is also an EfestoResource. Then, there could be another implementation that accept as input the above intermediate resuorce, and transform it to the final compilation outpout. This chaining is managed by the efesto framework out of the box. An example of that is featured by drools-related pmml models. During compilation, the PMML compiler generates an that is both an EfestoResource and an EfestoCompilationOutput. When the CompilationManager retrieves that compilation output, being it an EfestoResource, scans the plugins to find someone that is able to compile it. The fullfill this requirement, and proceed with drl-specific compilation. One thing to notice here is that different modules should limit as much as possible direct dependency between them. The second method is invoked by the compilation manager if the previous one returned true. That method receives also an EfestoCompilationContext as parameter. Code-generating implementations should rely on that context for compilation and classloading. The third method is used by the framework to discover, at execution time, which models can actually be managed. Thanks to that method, there is a complete de-coupling between the framework and the implementation themselves, since the framework can discover dynamically the available models, and every plugin may freely define its own model. Last critical bit is that every compilation module should contain an org.kie.efesto.compilationmanager.api.service.KieCompilerService file inside src/main/resources/META-INF directory, and that file should contain all the KieCompilationService implementations provided by that module. RUNTIMESERVICE API declares three methods: boolean canManageInput(EfestoInput toEvaluate, K context); Optional<E> evaluateInput(T toEvaluate, K context); String getModelType(); The first one is invoked by the RuntimeManager to verify if the specific implementation is able to manage the given input. The evaluation could be based on the actual type of the resource, on some details of the content, or on a mix of them. It is responsibility of the implementation to find the appropriate logic. The only requirement to keep in mind is that, during execution, there should be at most one implementation that return true for a given EfestoInput, otherwise an exception is thrown. The following snippet is an example where a given EfestoInput is considered valid if it is an EfestoInputPMML and the given identifier has already been compiled: public static boolean canManage(EfestoInput toEvaluate, EfestoRuntimeContext runtimeContext) { return (toEvaluate instanceof EfestoInputPMML) &amp;& isPresentExecutableOrRedirect(toEvaluate.getModelLocalUriId(), runtimeContext); } The above implementation works because EfestoInputPMML is a class specifically defined by the plugin itself, so there are no possible "overlaps" with other implementations. The difference with the compilation side is that the KieRuntimeService implementation should also check that the model related to the given unique identifier has already been compiled. A single plugin may manage different types of input for the same model. For example, the rule plugin may manage both an EfestoInputDrlKieSessionLocal and an AbstractEfestoInput that contains an EfestoMapInputDTO. There are different possible strategies to do that. For example, the plugin may provide one single "runtime-module" with two classes implementing the KieRuntimeService; or it may define two "runtime-modules", each of which with one implementation, or one single class may manage both kind of inputs. Again, this is responsibility of the plugin itself. This also push toward code reusage. For a given model, there could be a common code-path that provides the final runtime result, and different entry point depending on the input format. It is so possible that a runtime implementation would need a result from another implementation. In that case, the calling runtime will create a specifically-crafted EfestoInput and will ask the RuntimeManage the result for it. This chaining is managed by the efesto framework out of the box. An example of that is featured by drools-related pmml models. During execution, the PMML runtime generates an EfestoInput<EfestoMapInputDTO> and send it to the RuntimeManager. The RuntimeManager scans the plugins to find someone that is able to execute it. The fullfill this requirement, and proceed with drl-specific execution. One thing to note here is thet modules should limit as much as possible direct dependency between them! The second method is invoked by the runtime manager if the previous one returned true. That method receives also an EfestoRuntimeContext as parameter. Code-generating implementations should rely on that context to retrieve/load classes generated during compilation. The third method is used by the framework to discover, at execution time, which models can actually be managed. Thanks to that method, there is a complete de-coupling between the framework and the implementation themselves, since the framework can discover dynamically the available models, and every plugin may freely define its own model. Last critical bit is that every compilation module should contain an org.kie.efesto.runtimemanager.api.service.KieRuntimeService file inside src/main/resources/META-INF directory, and that file should contain all the KieRuntimeService implementations provided by that module. CONCLUSION This post was meant to provide more technical details on what have been introduced in the . Following ones will provide concrete step-by-step tutorial and real uses-cases so… stay tuned!!! The post appeared first on .Gabriele CardosiBind services created with AWS Controllers for KubernetesBaiju Muthukadanea05a1b5-94fc-4f41-9f45-43a331d6a4c72022-09-21T07:00:00Z2022-09-21T07:00:00Z<p>Application developers can define Amazon Web Services (AWS) resources directly from Kubernetes using <a href="https://aws-controllers-k8s.github.io/community/docs/community/overview/">AWS Controllers for Kubernetes</a> (ACK). You can use the <a href="https://redhat-developer.github.io/service-binding-operator/userguide/intro.html">Service Binding Operator</a> to easily connect applications to any AWS service provisioned through ACK.</p> <p>This article explores the connection with an RDS database and demonstrates configuring ACK to create a service instance for the AWS Relational Database Service (RDS). You can also learn how to use Service Binding Operator annotations to bind a PostgreSQL service created using RDS and a REST API.</p> <h2>Benefits of the Service Binding Operator and AWS Controllers for Kubernetes </h2> <p>One benefit of the Service Binding Operator and ACK is that they streamline the formation of a connection. The Service Binding Operator implements the <a href="https://servicebinding.io">Service Binding specification for Kubernetes</a>. This is a Kubernetes-wide specification for automating the process of service secrets communicating to workloads.</p> <p>Another benefit of using the Service Binding Operator is that the only focus of applications with many microservices (maybe hundreds of them) is setting the correct label to receive binding data from the services specified by Service Binding Operator resources using the <a href="https://redhat-developer.github.io/service-binding-operator/userguide/binding-workloads-using-sbo/binding-options.html#binding-workloads-using-a-label-selector">label selector</a>.</p> <p>The Service Binding Operator supports the following methods to obtain connection details from a service:</p> <ul> <li><a href="https://github.com/servicebinding/spec#provisioned-service">Provisioned Service</a></li> <li><a href="https://github.com/servicebinding/spec#direct-secret-reference">Direct Secret Reference</a></li> <li><a href="https://redhat-developer.github.io/service-binding-operator/userguide/exposing-binding-data/adding-annotation.html">Annotations</a></li> </ul> <p>Currently, ACK does not support the Provisioned Service method. And no single secret contains all the connection details. In such a scenario, you can use the annotation support provided by the Service Binding Operator and add this annotation to a Custom Resource (CR) or Custom Resource Definition (CRD).</p> <p>The following articles offer more information about ACK, including where the ACK project came from, why the Operator pattern is used, and how to configure and use ACK:</p> <ul> <li><a href="https://developers.redhat.com/articles/2022/05/16/how-use-operators-aws-controllers-kubernetes">How to use Operators with AWS Controllers for Kubernetes</a></li> <li><a href="https://developers.redhat.com/articles/2022/05/24/create-aws-resources-kubernetes-and-operators">Create AWS resources with Kubernetes and Operators</a></li> </ul> <h2>Step 1:  Prerequisites setup</h2> <p>The prerequisites for this demonstration are pretty simple. You must have an AWS account and a <a href="https://developers.redhat.com/openshift">Red Hat OpenShift</a> cluster with the Service Binding Operator installed.</p> <h3>AWS account permissions</h3> <p>Your AWS account must have the <a href="https://aws-controllers-k8s.github.io/community/docs/user-docs/authorization/#aws-iam-permissions-for-ack-controller">IAM role permissions</a> for the Amazon Relational Database Service (RDS) ACK controller. The policy required for RDS is:</p> <p><code>arn:aws:iam::aws:policy/AmazonRDSFullAccess</code></p> <h3>OpenShift cluster with the Service Binding Operator</h3> <p>You need administrator access to an OpenShift cluster. To install the Sevice Binding Operator, create a subscription similar to this example:</p> <pre> <code class="java">apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: my-service-binding-operator namespace: openshift-operators spec: channel: stable name: rh-service-binding-operator source: redhat-operators sourceNamespace: openshift-marketplace</code></pre> <p>For example, place this configuration in a file named <code>subscription.yaml</code>. Then use the following <code>oc</code> command to create the resource:</p> <pre> <code class="language-bash">$ oc apply -f subscription.yaml</code></pre> <p>Alternatively, you can install the Service Binding Operator from <a href="https://operatorhub.io">OperatorHub</a> using the OpenShift administrator console.</p> <h2>Step 2:  Install the RDS Operator in an OpenShift cluster</h2> <p>These four steps use the ACK Operator to install the RDS database. The official documentation shows detailed information about configuring ACK in an OpenShift cluster.</p> <h3>1. Create a namespace</h3> <p>The following example uses a namespace called <code>ack-system</code>:</p> <pre> <code class="language-bash">$ oc new-project ack-system</code></pre> <p>This is the output you should see:</p> <pre> <code class="java">Now using project "ack-system" on server "https://example.org:6443". ...</code></pre> <h3>2. Create a config map</h3> <p>Create a config map with the following content in a <code>config.txt</code> file:</p> <pre> <code class="java">ACK_ENABLE_DEVELOPMENT_LOGGING=true ACK_LOG_LEVEL=debug ACK_WATCH_NAMESPACE= AWS_REGION=us-west-2 AWS_ENDPOINT_URL= ACK_RESOURCE_TAGS=hellofromocp</code></pre> <p>Use this config map in your OpenShift cluster as follows:</p> <pre> <code class="language-bash">$ oc create configmap --namespace ack-system \ --from-env-file=config.txt ack-rds-user-config </code></pre> <h3>3. Create a secret</h3> <p>Save the following authentication values in a file, such as <code>secrets.txt</code>:</p> <pre> <code class="java">AWS_ACCESS_KEY_ID=<access key id> AWS_SECRET_ACCESS_KEY=<secret access key></code></pre> <p>Use this <code>secrets.txt</code> file to create a secret in your OpenShift cluster as follows:</p> <pre> <code class="language-bash">$ oc create secret generic \ --namespace ack-system \ --from-env-file=secrets.txt ack-rds-user-secrets</code></pre> <p class="Indent1"><strong>Note</strong>: Be sure to secure access to this resource and the namespace because you will keep sensitive information in this secret—your AWS Access Key ID and AWS Secret Access Key.</p> <p>Alternatively, you can set up secure access using <a href="https://aws-controllers-k8s.github.io/community/docs/user-docs/irsa/#create-an-iam-role-for-your-ack-service-controller">IAM Roles for Service Accounts</a> (IRSA).</p> <h3>4. Install the relational database service</h3> <p>Refer to the article <a href="https://developers.redhat.com/articles/2022/05/24/create-aws-resources-kubernetes-and-operators">How to get Operators to use AWS Controllers for Kubernetes</a> for ACK RDS controller installation instructions. After successful installation, this page (Figure 1) appears in the administrator console.</p> <figure role="group"> <div class="rhd-c-figure"> <article class="media media--type-image media--view-mode-article-content-full-width"> <div class="field field--name-image field--type-image field--label-hidden field__items"> <a href="https://developers.redhat.com/sites/default/files/blog-ack.png" data-featherlight="image"><img src="https://developers.redhat.com/sites/default/files/styles/article_full_width_1440px_w/public/blog-ack.png?itok=nbjyUns8" width="1440" height="710" alt="This page appears in the OpenShift administrator console after installation." loading="lazy" typeof="Image" /> </a> </div> <div class="field field--name-field-caption field--type-string field--label-hidden field__items"> <div class="rhd-c-caption field__item"> Figure 1: After the ACK RDS controller is installed, this page appears in the OpenShift administrator console. </div> </div> </article> </div> <figcaption class="rhd-c-caption"></figcaption> </figure> <h2>Step 3:  The consumption of annotations and label selectors</h2> <p>To enable binding, the Service Binding Operator uses the following annotations that are part of the <code>DBInstance</code> resource in a <a href="https://helm.sh">Helm chart</a>:</p> <pre> <code class="java">apiVersion: rds.services.k8s.aws/v1alpha1 kind: DBInstance metadata: annotations: "service.binding/type": "path={.spec.engine}" "service.binding/provider": "aws" "service.binding/host": "path={.status.endpoint.address}" "service.binding/port": "path={.status.endpoint.port}" "service.binding/username": "path={.spec.masterUsername}" "service.binding/password": 'path={.spec.masterUserPassword.name},objectType=Secret,sourceKey=password' "service.binding/database": "path={.spec.engine}" ...</code></pre> <p>The <code>DBInstance</code> definition represents an AWS RDS resource.</p> <p>To define the workload, the Service Binding Operator uses the following label selector (part of the <code>ServiceBinding</code> resource in the Helm chart):</p> <pre> <code class="java">apiVersion: binding.operators.coreos.com/v1alpha1 kind: ServiceBinding metadata: name: servicebinding-rds-endpoint-demo spec: bindAsFiles: true services: - group: rds.services.k8s.aws version: v1alpha1 kind: DBInstance name: {{ .Values.dbinstance.name }} application: labelSelector: matchLabels: psql.provider: aws (*) version: v1 group: apps resource: deployments</code></pre> <p class="Indent1">(*) This line specifies the label that the Service Binding Operator uses to identify the workload.</p> <p>The Helm charts are available in the <a href="https://github.com/redhat-developer/openshift-app-services-demos">app-services-samples repository</a>.</p> <p>We have not deployed the application yet. Typically, the ServiceBinding controller waits for a workload resource with a matching <code>psql.provider: aws</code> label. As soon as a workload resource is available with the matching label, the Operator uses the ServiceBinding controller to project the binding values to the workload.</p> <p>The binding values projects into the <code>/bindings</code> directory inside the container of the workload resource. The following directory structure stores the values:</p> <pre> <code class="java">/bindings └── servicebinding-rds-endpoint-demo ├── type ├── database ├── host ├── username └── password</code></pre> <p>The REST API application uses a suitable and compliant <a href="https://servicebinding.io/application-developer/#language-specific-libraries">library</a> to consume the projected binding values.</p> <h2>Step 4:  Create a database instance</h2> <p>After you clone the <a href="https://github.com/redhat-developer/openshift-app-services-demos">app-services-samples repository</a> described in the previous section, change to the <code>openshift-app-services-demos/samples/sbo/ack-rds-blog</code> directory to perform these two steps:</p> <p>1. Run Helm on the <code>rds-postgre-chart-demo</code> chart:</p> <pre> <code class="language-bash">$ helm install rds-postgre-chart-demo -n ack-system rds-postgre-chart-demo</code></pre> <p>This is the output you should see:</p> <pre> <code class="java">NAME: rds-postgre-chart-demo LAST DEPLOYED: Thu Aug 4 09:29:26 2022 NAMESPACE: ack-system STATUS: deployed REVISION: 1 TEST SUITE: None</code></pre> <p>2. Run the following command to validate the database instance:</p> <pre> <code class="language-bash">$ kubectl get dbinstance rds-test-demo -n ack-system -o=jsonpath='{.status.dbInstanceStatus}'</code></pre> <p>Output:</p> <pre> <code class="java">available</code></pre> <p>Now the database is ready to use.</p> <h2>Step 5:  Deploy the REST API application</h2> <p>In this demo, we use the Software Security Module (SSM), a Go-based REST API application. For convenience, deploy the application using the Helm chart in the <a href="https://github.com/redhat-developer/openshift-app-services-demos">app-services-samples repository</a>. After you clone the repository, perform the following steps from the <code>openshift-app-services-demos/samples/sbo/ack-rds-blog</code> directory.</p> <p>1. Run Helm on the <code>ssm-chart</code> chart:</p> <pre> <code class="language-bash">$ helm install ssm-chart -n ack-system ssm-chart</code></pre> <p>Output:</p> <pre> <code class="java">NAME: ssm-chart LAST DEPLOYED: Thu Aug 4 04:22:24 2022 NAMESPACE: ack-system STATUS: deployed REVISION: 1 TEST SUITE: None</code></pre> <p>2. Verify that the deployment of the REST API application is successful by running:</p> <pre> <code class="language-bash">$ kubectl get deployment -n ack-system</code></pre> <p>Output:</p> <pre> <code class="java">NAME READY UP-TO-DATE AVAILABLE AGE ack-rds-controller 1/1 1 1 28m</code></pre> <p>The deployment is defined as follows in the Helm chart:</p> <pre> <code class="java">apiVersion: apps/v1 kind: Deployment metadata: name: {{ .Values.k8Name }} annotations: app.kubernetes.io/part-of: ssm labels: psql.provider: aws (*) ...</code></pre> <p>(*) This line specifies the required matching label that the ServiceBinding controller uses to identify the workload and project the bindings.</p> <p>The ServiceBinding controller watches for a deployment matching the label. After the deployment is ready, the Operator uses the ServiceBinding controller to project the binding values to the workload.</p> <h2>Step 6:  Access and validate the REST API application</h2> <p>The <code>ssm-chart</code> Helm chart also creates an <code>ssm</code> service resource for convenient access to the application. The <code>ssm</code> service resource points to the REST API application. Before connecting to this application, make sure you have the <code>DBInstance</code> resource created and ready with an RDS instance provisioned in the AWS.</p> <p>Switch to another terminal to run the commands in the following steps.</p> <h3>1. Access the REST API application by forwarding the port of the service</h3> <p>An <code>oc</code> command on OpenShift is useful for port forwarding:</p> <pre> <code class="language-bash">$ oc port-forward --address 0.0.0.0 svc/ssm 8080:8080 -n ack-system</code></pre> <h3>2. Validate the application</h3> <p>Validate that the application works as follows:</p> <h4>Generate a based64-encoded string</h4> <p>Start by creating a string from random input:</p> <pre> <code class="language-bash">$ openssl rand 32 | base64</code></pre> <p>This output contains the string you will use as input in the next step.:</p> <pre> <code class="java">rgeR0ENzlxG+Erss6tw0gBkBWdLOPrQhEFQpH8O5t/Y=</code></pre> <p> </p> <h4>Call the wrap API</h4> <p>Call the application's <code>wrap</code> API to create a cipher from the string by using the based64-encoded string from the previous step as input when calling the <code>wrap</code> API:</p> <pre> <code class="language-bash">$ curl http://localhost:8080/wrap -d '{"key": "rgeR0ENzlxG+Erss6tw0gBkBWdLOPrQhEFQpH8O5t/Y="}'</code></pre> <p>This output contains the cipher string you will use as input in the next step:</p> <pre> <code class="java">{"cipher":"D/S6wDJPH ... "}</code></pre> <p> </p> <h4>Call the unwrap API</h4> <p>Now call the application's <code>unwrap</code> API to restore the original based64 -encoded string by submitting the JSON from the output in the previous section to the <code>unwrap</code> API:</p> <pre> <code class="language-bash">$ curl http://localhost:8080/unwrap -d '{"cipher":"D/S6wDJPH ... "}'</code></pre> <p>The output returns the original based64-encoded string:</p> <pre> <code class="java">{"key":"rgeR0ENzlxG+Erss6tw0gBkBWdLOPrQhEFQpH8O5t/Y="} </code></pre> <p> </p> <h2>The Service Binding Operator simplifies installation and deployment</h2> <p>With the annotation support of the Service Binding Operator, you can easily bind ACK services without making any changes to the code. You can use the same label to bind any number of workloads. The REST API application consumes the projected binding values by using one of the <a href="https://servicebinding.io/application-developer/#language-specific-libraries">libraries</a> compliant with the Service Binding specification for Kubernetes. You can use the REST API application to connect to the AWS RDS service without any specific change.</p> The post <a href="https://developers.redhat.com/articles/2022/09/21/bind-services-created-aws-controllers-kubernetes" title="Bind services created with AWS Controllers for Kubernetes">Bind services created with AWS Controllers for Kubernetes</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Baiju Muthukadan2022-09-21T07:00:00ZNew visualizer for the Serverless Workflow EditorRoger Pallejahttps://blog.kie.org/2022/09/new-visualizer-for-the-serverless-workflow-editor.html2022-09-21T00:08:57ZWe’re happy to announce that a new diagram visualizer for the domain has been released, as part of the kogito tooling 0.23.0, and It becomes as default for the . Kogito – Serverless Workflow Editor – VSCode extension If you are not familiar with the kogito tooling and its extensions, please refer to the guide first. A part from the previous capabilities of the editor, this new diagram visualizer provides a bunch of additional features to help users during the authoring of their workflows, such as: * Automatic workflow reloading It dynamically reloads the workflow’s visualization, once any change is being done in the JSON declaration text panel. * Error Handling In case the workflow’s JSON declaration is not valid (thus the workflow cannot be automatically reloaded), the editor presents the latest valid visualization for the workflow, and also an icon appears on the top right corner: On mouse over the error icon, it  displays the cause of the error as well, by showing a user friendly message. Once the diagram is valid again, the error icon will disappear, and the visualization will be properly updated. * State navigation Once an state is being selected in the diagram visualizer (by clicking on it), the editor automatically navigates to the line, in the JSON declaration, where the state is being defined. * Mediators Users are able to play with mediators either by using the mouse, or by using the available buttons in the mediators bar: * Auto-fit to diagram size: It fits the diagram to the actual viewport size * Zoom: Scales the viewport accordingly (also available by using mouse mediators, please see the keybindings page) * Panning: Translates the viewport accordingly (only available by using mouse mediators, please see the keybindings page) * Export  workflow to SVG From the technical perspective, just mention it is based on , and it relies on Canvas as the main rendering technology. Please keep posted on further updates, new features and improvements coming soon! The post appeared first on .Roger PallejaQuarkus 2.12.3.Final releasedGuillaume Smethttps://quarkus.io/blog/quarkus-2-12-3-final-released/2022-09-21T00:00:00ZToday, we released Quarkus 2.12.3.Final, with a new round of bugfixes and documentation improvements. It is a safe upgrade for anyone using 2.12. Migration Guide If you are not already using 2.12, please refer to our migration guide. Full changelog You can get the full changelog of 2.12.3.Final on GitHub....Guillaume SmetTransparent ML, integrating Drools with AIX360Matteo Mortarihttps://blog.kie.org/2022/09/transparent-ml-integrating-drools-with-aix360.html2022-09-20T12:10:33ZFollowing up from about integrating Drools with the Open Prediction Service, in this new post we want to share the current results from another exploration work: this time integrating Drools with research on Transparent Machine Learning by IBM. INTRODUCTION Transparency is a key requirement in many business sectors, from FSI (Financial Services Industry), to Healthcare, to Government institutions, and many others. In more recent years, a generalized need for increased transparency in the decision making processes has gained a great deal of attention from several different stakeholders, especially when it comes to automated decisioning and AI-based decision services. Specifically in the Eurozone, this ties with the and the requirement for explainability in the way businesses automate processes and decision making. Additionally, an “” is proposed and currently under discussion at the European Commission: under the current status of the proposal several risk levels are identified. The integration of AI in the business process and decision model will likely require explainability, transparency and a conformity assessment, depending on the applicable risk level: In other parts of the world, similar legislations are coming into effect or are currently being proposed. You can read more details in . With these considerations in mind, we will explore how to leverage rule induction strategies and specific types of machine learning models, with the intent of producing predictive models which can integrate with effective results into this general context. TRANSPARENT ML WITH DROOLS AND AIX360 One way to address some of the problems and requirements highlighted in the previous section is to use Machine Learning to generate specific types of models that are inherently readable and transparent. As we will see in this blog post, a transparent predictive model can be handed over easily to the next phase as a decision model, in order to be evaluated as-is, but most importantly for the ability to be inspected and authored directly! Comparing a Transparent ML approach with the broader general Machine Learning, we can highlight some of its characteristics: General Machine Learning evaluation:Transparent ML approach:All supported model types, but black box evaluationModel can be inspected, authored, evaluatedAccuracy focusedTransparency focusedeXplainable AI complements, such as Intrinsically eXplainableMLOps —governed by data scienceBusiness centric governanceMultiple runtimesPotentially single runtime Naturally the transparent ML approach has its limitations; we will discuss alternative approaches in the conclusions of this blog post. An example pipeline can be summarized as follows: For the examples in this blog post, we will use the dataset  (predicting if income exceeds $50K/yr from census data). Let’s get started! RULE SET INDUCTION In this section we will make use of the , an open-source library that supports interpretability and explainability of datasets and machine learning models. Our goal in this phase is to generate a predictive model from the UCI Adult dataset, using Machine Learning techniques: To generate a transparent predictive model, we can drive the generation of a RuleSet , as explained in the following Jupyter notebook : As a result of this, we have now generated a set of rules, in the form of a PMML RuleSet, which represents the transparent predictive model for the Adult dataset: If you are interested to delve into more details about using AIX360 and related algorithms, you can check out . DROOLS In this section, we will transform the result from the previous steps into an executable decision model, which can also be directly authored. Please note: in a different context, where the only requirement is the execution of predictive models in general, you can simply make reference to the PMML support for Drools from the , or to integration blueprints such as the integration of Drools with IBM Open Prediction Service from a . In this article instead, as premised, we’re interested in the result of a transparent prediction model, which can be fully inspected, authored and (naturally!) evaluated. Specifically, we will transform the transparent predictive model serialized as a RuleSet, into a DMN model with DMN Decision Tables. To perform this transformation, we will make use of the kie-dmn-ruleset2dmn utility; this is available as a developer API, and as a command line utility too. You can download a published version of the command line utility (executable .jar) from ; otherwise, you can lookup a more recent version directly from . To transform the RuleSet file into a DMN model, you can issue the following command: $ java -jar kie-dmn-ruleset2dmn-cli-8.27.0.Beta.jar adult.pmml --output=adult.dmn This will result in a .dmn file generated, which you can author with the Kogito Tooling and evaluate as usual with the ! We can upload the generated .dmn file onto the sandbox: We can make use of the Kie Sandbox extended services, to evaluate locally the DMN model, as-is or authored as needed! It’s interesting to note the static analysis of the DMN decision table identifies potential gaps in the table, and subsumptions in the rules inducted during the Machine Learning phase; this is expected, and can be authored directly depending on the overall business requirements. From the model evaluation perspective, overlapping rules are not a problem, as they would evaluate to the same prediction; this is a quite common scenario when the ML might have identified overlapping “clusters” or grouping over a number of features, leading to the same output. From a decision table perspective however, overlapping rules can be simplified, as a more compact representation of the same table semantic is often preferable in decision management. Here it is up to the business to decide if to keep the table as translated from the original predictive model, or to leverage the possibilities offered by the transparent ML approach, and simplify/compact the table for easier read and maintenance by the business analyst. DEPLOY We can deploy directly from the KIE Sandbox: Our Transparent prediction and decision model is available as a deployment on OpenShift ! As you can see, with just the click of a button in the KIE Sandbox, our transparent ML model has been easily deployed on OpenShift. If you want to leverage the serverless capabilities of Knative for auto-scaling (including auto scale to zero!) for the same predictive model, you can consider packaging it as a Kogito application. You can find more information in this . CONCLUSION We have seen how a Transparent ML approach can provide solutions to some of the business requirements and conformance needs to regulations such as GDPR or AI Act; we have seen how to drive rule induction by generating predictive models which are inherently transparent, can be authored directly as any other decision model, and can be deployed on a cloud-native OpenShift environment. In this post, we have focused ourselves on using directly upstream AIX360 and Drools. You can refer to the above diagram for commercial solutions by IBM and Red Hat that include these projects too, such as , , . If you are interested in additional capabilities for eXplainable AI solutions, check-out the ! The Transparent ML predictive model, now available as a decision service, can be integrated in other DMN models and other applications, as needed. For example, the transparent prediction on the Adult dataset (predicting if income exceeds $50K/yr) could become invocable as part of another decision service that decides on the applicability for the requests of issuing a certain type of credit card. Another possible integration could be to employ a transparent ML predictive model in the form of scorecards, inside a broader DMN model for segmentation; that is, first identify the applicable category/segment based on the input data, and then apply one of several score cards for the specific segment. Don’t miss on checking out the on related Transparent ML topics! Hope you have enjoyed this blog post, showcasing integration of several technologies to achieve a transparent ML solution! Questions? Feedback? Let us know with the comment section below! Special thanks for Greger Ottosson and Tibor Zimanyi for their help while crafting this content. The post appeared first on .Matteo MortariHow hashing and cryptography made the internet possibleAndy Oram60312bd5-c40d-4f54-9a4e-fbce728d85182022-09-20T07:00:00Z2022-09-20T07:00:00Z<p>A lot of technologies, business choices, and public policies gave us the internet we have today—a tremendous boost to the spread of education, culture, and commerce, despite its well-documented flaws. But few people credit two deeply buried technologies for making the internet possible: hashing and cryptography.</p> <p>If more people understood the role these technologies play, more money and expertise would go toward uncovering and repairing security flaws. For instance, we probably would have fixed the <a href="http://heartbleed.com/">Heartbleed</a> programming error much earlier and avoided widespread vulnerabilities in encrypted traffic.</p> <p>This article briefly explains where hashing and cryptography come from, how they accomplish what they do, and their indelible effect on the modern internet.</p> <h2>Hashing</h2> <p>Hashing was <a href="https://www.geeksforgeeks.org/importance-of-hashing/">invented in the 1950s</a> at the world's pre-eminent computer firm of that era, IBM, by Hans Peter Luhn. What concerned him at the time was not security—how many computer scientists thought about that?—but saving disk space and memory, the most costly parts of computing back then.</p> <p>A <em>hash</em> is a way of reducing each item of data to a small, nearly unique, semi-random string of bits. For instance, if you are storing people's names, you could turn each name into the numerical value of the characters and run a set of adds, multiplies, and shift instructions to produce a 16-bit value. If the hash is good, there will be very few names that produce the same 16-bit value—very few <em>collisions</em>, as that situation is called.</p> <p>Now suppose you want to index a database for faster searching. Instead of indexing the names directly, it's much simpler and more efficient to make the index out of 16-bit values. That was one of the original uses for hashes. But they turned out to have two properties that make them valuable for security: No one can produce the original value from the hash, and no one can substitute a different value that produces the same hash. (It is theoretically possible to do either of those things, but doing so would be computationally infeasible, so they're impossible in practice.)</p> <p>Early Unix systems made use of this property to preserve password security. You created a password along with your user account and gave it to the computer, but the operating system never stored the password itself—it stored only a hash. Every time you entered your password after that, the operating system ran the hash function and let you log in if the resulting hash matched the one in the system. If the password file were snatched up by a malicious intruder, all they would get is a collection of useless hashes. (This clever use of hashes eventually turned out not to be secure enough, so it was replaced with <em>encryption,</em> which we'll discuss in more detail in the next section of this article.)</p> <p>Hashes are also good for ensuring that no one has tampered with a document or software program. Injecting malware into free software on popular repositories is not just a theoretical possibility—<a href="https://github.blog/2022-05-26-npm-security-update-oauth-tokens/">it can actually happen</a>. Therefore, every time a free software project releases code, the team runs it through a hash function. Every user who downloads the software can run it through the same function to make sure nobody has intercepted the code and inserted malware. If someone changed even one bit and ran the hash function, the resulting hash would be totally different.</p> <p>Git is another of the myriad tools that use hashes to ensure the integrity of the repository, as well as to enable quick checks on changes to the repository. You can see a hash (a string of random characters) each time you issue a push or log command:</p> <pre> <code class="language-java">commit 2de089ad3f397e735a45dda3d52d51ca56d8f19a Author: Andy Oram <andyo@example.com> Date: Sat Sep 3 16:28:41 2022 -0400 New material related to commercialization of cryptography. commit f39e7c87873a22e3bb81884c8b0eeeea07fdab48 Author: Andy Oram <andyo@example.com> Date: Fri Sep 2 07:47:42 2022 -0400 Fixed typos. </code></pre> <p>Hash functions can be broken, so <a href="https://valerieaurora.org/hash.html">new ones are constantly being invented</a> to replace the functions that are no longer safe.</p> <h2>Cryptography</h2> <p>Mathematically speaking, the goal of cryptography has always been to produce output where each bit or character has an equal chance of being another character. If someone intercepted a message and saw the string "xkowpvi," the "x" would have an equal chance of representing an A, a B, a C, and so on.</p> <p>In digital terms, every bit in an encrypted message has a 50% chance of representing a 0 and a 50% chance of representing a 1.</p> <p>This goal is related to hashing, and there is a lot of overlap between the fields. Security experts came up with several good ways to create encrypted messages that couldn't be broken—that is, where the decryption process would be computationally infeasible without knowing the secret key used to encrypt the message. But for a long time these methods suffered from an "initial exchange" problem: The person receiving the message needed to somehow also learn what that secret encryption key was, and learn it in a way that didn't reveal the key to anybody else. Whether you're a spy in World War II Berlin trying to communicate with your U.S. buddies, or a modern retail site trying to confirm a customer's credit card online, getting the shared secret securely is a headache.</p> <p>The solution by now is fairly familiar. The solution creates a pair of keys, one of which you keep private and the other of which you can share freely. Like a hash, the public key is opaque, and no one can determine your private key from it. (The number of bits in the key has to be doubled every decade or so as computers get more powerful.) This solution is generally <a href="https://cryptography.fandom.com/wiki/Diffie%E2%80%93Hellman_key_exchange">attributed to Whitfield Diffie, Martin Hellman, and Ralph Merkle</a>, although a British intelligence agent thought of the solution earlier and kept it secret.</p> <p>Diffie in particular was acutely conscious of social and political reasons for developing public key encryption. In the 1970s, I think that few people thought of doing online retail sales or services using encryption. It was considered a tool of spies and criminals—but also of political dissidents and muckraking journalists. These associations explain why the U.S. government tried to suppress it, or at least keep it from being exported, for decades.</p> <p>Diffie is still quite active in the field. The most recent article I've seen with him listed as an author was published on July 18, 2022.</p> <p>The linchpin of internet cryptography came shortly afterward with <a href="https://www.telsy.com/rsa-encryption-cryptography-history-and-uses/">RSA encryption</a>, invented by Ron Rivest, Adi Shamir, and Len Adleman. RSA encryption lets two parties communicate without previously exchanging keys, even public keys. (They were prevented from reaping much profit from this historic discovery because the U.S. government prevented the export of RSA technology during most of the life of their patent.)</p> <p>A big problem in key exchange remains: If someone contacts you and says they are Andy Oram, proffering what they claim to be Andy Oram's public key, how do you know they're really me? The two main solutions (web of trust and certificate authorities) are beyond the scope of this article, and each has vulnerabilities and a lot of overhead. Nevertheless, the internet seems to work well enough with certificate authorities.</p> <h2>The internet runs on hashes and cryptography</h2> <p>The internet essentially consists of huge computer farms in data centers, to which administrators and other users have to log in. For many years, the universal way to log into another system was Telnet, now abandoned almost completely because it's insecure. If you use Telnet, someone down the hall can watch your password cross the local network and steal the password. Anyone else who can monitor the network could do the same.</p> <p>Nowadays, all communication between users and remote computers goes over the secure shell protocol (SSH), which was invented <a href="https://www.oreilly.com/library/view/ssh-the-secure/0596008953/ch01s05.html">as recently as 1995</a>. All the cloud computing and other data center administration done nowadays depend on it.</p> <p>Interestingly, 1995 also saw the advent of the <a href="https://www.techtarget.com/searchsecurity/definition/Secure-Sockets-Layer-SSL">secure sockets layer</a> (SSL) protocol, which marks the beginning of web security. Now upgraded to Transport Layer Security (TLS), this protocol is used whenever you enter a URL beginning with HTTPS instead of HTTP. The protocol is so important that <a href="https://security.googleblog.com/2014/08/https-as-ranking-signal_6.html">Google penalizes web sites that use unencrypted HTTP</a>.</p> <p>Because most APIs now use web protocols, TLS also protects distributed applications. In addition to SSH and TLS, encryption can be found everywhere modern computer systems or devices communicate. That's because the modern internet is beset with attackers, and we use hashes and encryption to minimize their harm.</p> <p>Some observers think that quantum computing will soon have the power to break encryption as we know it. That could leave us in a scary world: Everything we send over the wire would be available to governments or large companies possessing quantum computers, which are hulking beasts that need to be refrigerated to within a few degrees of absolute zero. We may soon need a <a href="https://nakedsecurity.sophos.com/2022/08/03/post-quantum-cryptography-new-algorithm-gone-in-60-minutes/">new army of Luhns, Diffies, and other security experts</a> to find a way to save the internet as we know it.</p> The post <a href="https://developers.redhat.com/articles/2022/09/20/how-hashing-and-cryptography-made-internet-possible" title="How hashing and cryptography made the internet possible">How hashing and cryptography made the internet possible</a> appeared first on <a href="https://developers.redhat.com/blog" title="Red Hat Developer">Red Hat Developer</a>. <br /><br />Andy Oram2022-09-20T07:00:00Z diff --git a/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/URLTransportCache.cacheIndex.properties b/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/URLTransportCache.cacheIndex.properties index cdd3a64..1b531c5 100644 --- a/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/URLTransportCache.cacheIndex.properties +++ b/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/URLTransportCache.cacheIndex.properties @@ -1,3 +1,2 @@ -https%3A%2F%2Fraw.githubusercontent.com%2Fjboss-developer%2Fjboss-stacks%2F1.0.0.Final%2Fstacks.yaml=/home/rhel/Documents/fpmbuild/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/669c3a45d3e84b322cb6962ab5b619550e53d665-8983980095858836061.tmp -http%3A%2F%2Fdownload.jboss.org%2Fjbosstools%2Fconfiguration%2Fide-config.properties=/home/rhel/Documents/fpmbuild/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/ad3e417d20abdea71e86f550c1be41ac94c87225-8985864248996239453.tmp -https%3A%2F%2Fdownload.jboss.org%2Fjbosstools%2Fstatic%2Fredhat-central%2Fjbosstools-central-webpage-2.0.0-20191001.1470.zip=/home/rhel/Documents/fpmbuild/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/f845a8067220e103e8aeb8e32dc41460a4e0b06f4204026950844469541.tmp +http%3A%2F%2Fdownload.jboss.org%2Fjbosstools%2Fconfiguration%2Fide-config.properties=/home/rhel/Documents/fpmbuild/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/ad3e417d20abdea71e86f550c1be41ac94c87225-5014627469562183720.tmp +https%3A%2F%2Fdownload.jboss.org%2Fjbosstools%2Fstatic%2Fredhat-central%2Fjbosstools-central-webpage-2.0.0-20191001.1470.zip=/home/rhel/Documents/fpmbuild/SOURCES/.metadata/.plugins/org.jboss.tools.foundation.core/ECF_REMOTE_CACHE/f845a8067220e103e8aeb8e32dc41460a4e0b06f-728589809762830807.tmp diff --git a/SOURCES/RemoteSystemsTempFiles/.project b/SOURCES/RemoteSystemsTempFiles/.project new file mode 100644 index 0000000..5447a64 --- /dev/null +++ b/SOURCES/RemoteSystemsTempFiles/.project @@ -0,0 +1,12 @@ + + + RemoteSystemsTempFiles + + + + + + + org.eclipse.rse.ui.remoteSystemsTempNature + + diff --git a/SOURCES/examplemod/.classpath b/SOURCES/examplemod/.classpath index e13b092..49413f3 100644 --- a/SOURCES/examplemod/.classpath +++ b/SOURCES/examplemod/.classpath @@ -35,6 +35,6 @@ - + diff --git a/SOURCES/examplemod/libs/FCUserDev.jar b/SOURCES/examplemod/libs/FCUserDev.jar index e91fed3..c3c10a5 100644 Binary files a/SOURCES/examplemod/libs/FCUserDev.jar and b/SOURCES/examplemod/libs/FCUserDev.jar differ diff --git a/SOURCES/examplemod/libs/FeatureCreepMC-4.0ESRPre8-UserDev-src.zip b/SOURCES/examplemod/libs/FeatureCreepMC-4.0ESRPre8-UserDev-src.zip new file mode 100644 index 0000000..45da910 Binary files /dev/null and b/SOURCES/examplemod/libs/FeatureCreepMC-4.0ESRPre8-UserDev-src.zip differ diff --git a/SOURCES/examplemod/pom.xml b/SOURCES/examplemod/pom.xml index b930ea5..6ab9f74 100644 --- a/SOURCES/examplemod/pom.xml +++ b/SOURCES/examplemod/pom.xml @@ -4,4 +4,6 @@ examplemod 0.0.1-SNAPSHOT examplemod + FeatureCreep TutorialMod + \ No newline at end of file diff --git a/SOURCES/examplemod/src/main/java/examplemod/ExampleMod.java b/SOURCES/examplemod/src/main/java/examplemod/ExampleMod.java index e0d4396..1bdd4d7 100644 --- a/SOURCES/examplemod/src/main/java/examplemod/ExampleMod.java +++ b/SOURCES/examplemod/src/main/java/examplemod/ExampleMod.java @@ -1,12 +1,14 @@ package examplemod; -import java.io.IOException; - import org.jboss.logging.Logger.Level; import featurecreep.FeatureCreep; import featurecreep.api.DatafiedObjectRegistration; import featurecreep.api.FCRegistries; import featurecreep.api.items.FCItem; +import featurecreep.api.items.armour.ArmourProtectionValuesArray; +import featurecreep.api.items.armour.FCArmour; +import featurecreep.api.items.armour.FCArmourMaterial; +import featurecreep.api.items.armour.FCArmourSlots; import featurecreep.api.items.datafied.dmr.FCItemAsDMR; import featurecreep.api.items.tools.FCAxe; import featurecreep.api.items.tools.FCHoe; @@ -20,11 +22,6 @@ import featurecreep.api.items.tools.datafied.dmr.FCPickaxeAsDMR; import featurecreep.api.items.tools.datafied.dmr.FCShovelAsDMR; import featurecreep.api.items.tools.datafied.dmr.FCSwordAsDMR; import featurecreep.api.ui.FCCreativeTabs; -import javassist.CannotCompileException; -import javassist.ClassPool; -import javassist.CtClass; -import javassist.CtMethod; -import javassist.CtNewMethod; public class ExampleMod { @@ -64,6 +61,14 @@ public static FCItem EXAMPLE_ITEM = new FCItem(4000, "example", "example_item", public static FCAxeAsDMR EXAPLE_DMR_AXE = new FCAxeAsDMR(4013, "example", "example_dmr_axe", FCCreativeTabs.TOOLS, EXAPLE_DMR_TOOL_MATERIAL, 0, 0); + public static FCArmourMaterial EXAMPLE_ARMOUR = new FCArmourMaterial(10, new ArmourProtectionValuesArray(5, 10, 8, 5), 20, EXAMPLE_ITEM, "example", 2, 0); + public static FCArmour AMETHYST_HELMET = new FCArmour(4014, "example", "example_helmet", FCCreativeTabs.COMBAT, EXAMPLE_ARMOUR, FCArmourSlots.HELMET); + public static FCArmour AMETHYST_CHESTPLATE = new FCArmour(4015, "example", "example_chestplate", FCCreativeTabs.COMBAT, EXAMPLE_ARMOUR, FCArmourSlots.TUBIC); + public static FCArmour AMETHYST_LEGGINS = new FCArmour(4016, "example", "example_leggings", FCCreativeTabs.COMBAT, EXAMPLE_ARMOUR, FCArmourSlots.LEGGINGS); + public static FCArmour AMETHYST_BOOTS = new FCArmour(4017, "example", "example_boots", FCCreativeTabs.COMBAT, EXAMPLE_ARMOUR, FCArmourSlots.BOOTS); + + + /** @@ -112,6 +117,14 @@ DatafiedObjectRegistration.registerDMRItem(EXAPLE_DMR_AXE); + + +FCRegistries.registerItem(AMETHYST_HELMET); +FCRegistries.registerItem(AMETHYST_CHESTPLATE); +FCRegistries.registerItem(AMETHYST_LEGGINS); +FCRegistries.registerItem(AMETHYST_BOOTS); + + // String oldclasspath = System.getProperty("java.class.path"); // System.out.println(oldclasspath); diff --git a/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_boots.dds b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_boots.dds new file mode 100644 index 0000000..4b5be49 Binary files /dev/null and b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_boots.dds differ diff --git a/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_chestplate.dds b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_chestplate.dds new file mode 100644 index 0000000..5985dc2 Binary files /dev/null and b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_chestplate.dds differ diff --git a/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_helmet.dds b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_helmet.dds new file mode 100644 index 0000000..941f5d7 Binary files /dev/null and b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_helmet.dds differ diff --git a/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_leggings.dds b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_leggings.dds new file mode 100644 index 0000000..a6039b7 Binary files /dev/null and b/SOURCES/examplemod/src/main/resources/gfx/interface/technologies/example!example_leggings.dds differ diff --git a/SOURCES/examplemod/src/main/resources/gfx/models/armour/example_layer_1.png b/SOURCES/examplemod/src/main/resources/gfx/models/armour/example_layer_1.png new file mode 100644 index 0000000..ada1db5 Binary files /dev/null and b/SOURCES/examplemod/src/main/resources/gfx/models/armour/example_layer_1.png differ diff --git a/SOURCES/examplemod/src/main/resources/gfx/models/armour/example_layer_2.png b/SOURCES/examplemod/src/main/resources/gfx/models/armour/example_layer_2.png new file mode 100644 index 0000000..baf9588 Binary files /dev/null and b/SOURCES/examplemod/src/main/resources/gfx/models/armour/example_layer_2.png differ diff --git a/SOURCES/examplemod/target/classes/META-INF/MANIFEST.MF b/SOURCES/examplemod/target/classes/META-INF/MANIFEST.MF index 1c4fac7..9b2138f 100644 --- a/SOURCES/examplemod/target/classes/META-INF/MANIFEST.MF +++ b/SOURCES/examplemod/target/classes/META-INF/MANIFEST.MF @@ -2,5 +2,4 @@ Manifest-Version: 1.0 Built-By: rhel Build-Jdk: 11.0.15 Created-By: Maven Integration for Eclipse - Main-Class: examplemod.ExampleMod diff --git a/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.properties b/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.properties index 4234bdf..58498c7 100644 --- a/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.properties +++ b/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.properties @@ -1,5 +1,5 @@ #Generated by Maven Integration for Eclipse -#Sat Sep 17 19:40:43 PDT 2022 +#Fri Sep 23 22:00:45 PDT 2022 m2e.projectLocation=/home/rhel/Documents/fpmbuild/SOURCES/examplemod m2e.projectName=examplemod groupId=examplemod diff --git a/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.xml b/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.xml index b930ea5..6ab9f74 100644 --- a/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.xml +++ b/SOURCES/examplemod/target/classes/META-INF/maven/examplemod/examplemod/pom.xml @@ -4,4 +4,6 @@ examplemod 0.0.1-SNAPSHOT examplemod + FeatureCreep TutorialMod + \ No newline at end of file diff --git a/SOURCES/examplemod/target/classes/examplemod.spec b/SOURCES/examplemod/target/classes/examplemod.spec index 819907f..8ec66f5 100644 --- a/SOURCES/examplemod/target/classes/examplemod.spec +++ b/SOURCES/examplemod/target/classes/examplemod.spec @@ -1,6 +1,6 @@ Name: examplemod#The Package name. This is used for things like package management when that becomes more common. Should be all lowercase with no special chars except - and _ Version: 4#A version For the Package -Release: 7%{?dist}#A release number for if you have more than 1 release. The dist part is to indicate which loader and version is being used, such as FeatureCreep 4 would be fc4 +Release: 8%{?dist}#A release number for if you have more than 1 release. The dist part is to indicate which loader and version is being used, such as FeatureCreep 4 would be fc4 Summary: This is a tutorial mod for teaching how to use FeatureCreep#This is a single line summary of the mod License: JSharp4Life Licence 1#Put the name of the Licence here diff --git a/SOURCES/examplemod/target/classes/examplemod/ExampleMod.class b/SOURCES/examplemod/target/classes/examplemod/ExampleMod.class index f255eba..f719ae0 100644 Binary files a/SOURCES/examplemod/target/classes/examplemod/ExampleMod.class and b/SOURCES/examplemod/target/classes/examplemod/ExampleMod.class differ diff --git a/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_boots.dds b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_boots.dds new file mode 100644 index 0000000..4b5be49 Binary files /dev/null and b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_boots.dds differ diff --git a/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_chestplate.dds b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_chestplate.dds new file mode 100644 index 0000000..5985dc2 Binary files /dev/null and b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_chestplate.dds differ diff --git a/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_helmet.dds b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_helmet.dds new file mode 100644 index 0000000..941f5d7 Binary files /dev/null and b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_helmet.dds differ diff --git a/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_leggings.dds b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_leggings.dds new file mode 100644 index 0000000..a6039b7 Binary files /dev/null and b/SOURCES/examplemod/target/classes/gfx/interface/technologies/example!example_leggings.dds differ diff --git a/SOURCES/examplemod/target/classes/gfx/models/armour/example_layer_1.png b/SOURCES/examplemod/target/classes/gfx/models/armour/example_layer_1.png new file mode 100644 index 0000000..ada1db5 Binary files /dev/null and b/SOURCES/examplemod/target/classes/gfx/models/armour/example_layer_1.png differ diff --git a/SOURCES/examplemod/target/classes/gfx/models/armour/example_layer_2.png b/SOURCES/examplemod/target/classes/gfx/models/armour/example_layer_2.png new file mode 100644 index 0000000..baf9588 Binary files /dev/null and b/SOURCES/examplemod/target/classes/gfx/models/armour/example_layer_2.png differ diff --git a/SPECS/examplemod.spec b/SPECS/examplemod.spec index 819907f..8ec66f5 100644 --- a/SPECS/examplemod.spec +++ b/SPECS/examplemod.spec @@ -1,6 +1,6 @@ Name: examplemod#The Package name. This is used for things like package management when that becomes more common. Should be all lowercase with no special chars except - and _ Version: 4#A version For the Package -Release: 7%{?dist}#A release number for if you have more than 1 release. The dist part is to indicate which loader and version is being used, such as FeatureCreep 4 would be fc4 +Release: 8%{?dist}#A release number for if you have more than 1 release. The dist part is to indicate which loader and version is being used, such as FeatureCreep 4 would be fc4 Summary: This is a tutorial mod for teaching how to use FeatureCreep#This is a single line summary of the mod License: JSharp4Life Licence 1#Put the name of the Licence here