{"$schema":"https://www.lobbyregister.bundestag.de/json-schemas/R2.22/Lobbyregister-Registereintrag-schema-R2.22.json","source":"Deutscher Bundestag, Lobbyregister für die Interessenvertretung gegenüber dem Deutschen Bundestag und der Bundesregierung","sourceUrl":"https://www.lobbyregister.bundestag.de","sourceDate":"2026-04-19T19:49:00.522+02:00","jsonDocumentationUrl":"https://www.lobbyregister.bundestag.de/informationen-und-hilfe/open-data-1049716","registerNumber":"R001275","registerEntryDetails":{"registerEntryId":68800,"legislation":"GL2024","version":10,"detailsPageUrl":"https://www.lobbyregister.bundestag.de/suche/R001275/68800","pdfUrl":"https://www.lobbyregister.bundestag.de/media/41/05/649421/Lobbyregister-Registereintraege-Detailansicht-R001275-2025-12-05_13-40-29.pdf","validFromDate":"2025-12-05T13:40:29.000+01:00","validUntilDate":"2026-03-25T17:28:15.000+01:00","fiscalYearUpdate":{"updateMissing":false,"lastFiscalYearUpdate":"2025-07-29T11:19:36.000+02:00"}},"accountDetails":{"activeLobbyist":true,"activeDateRanges":[{"fromDate":"2024-06-27T23:20:33.000+02:00"}],"firstPublicationDate":"2022-02-24T13:23:27.000+01:00","lastUpdateDate":"2025-12-05T13:40:29.000+01:00","registerEntryVersions":[{"registerEntryId":68800,"jsonDetailUrl":"https://www.lobbyregister.bundestag.de/sucheJson/R001275/68800","version":10,"legislation":"GL2024","validFromDate":"2025-12-05T13:40:29.000+01:00","validUntilDate":"2026-03-25T17:28:15.000+01:00","versionActiveLobbyist":true},{"registerEntryId":62942,"jsonDetailUrl":"https://www.lobbyregister.bundestag.de/sucheJson/R001275/62942","version":9,"legislation":"GL2024","validFromDate":"2025-11-26T09:40:59.000+01:00","validUntilDate":"2025-12-05T13:40:29.000+01:00","versionActiveLobbyist":true},{"registerEntryId":62224,"jsonDetailUrl":"https://www.lobbyregister.bundestag.de/sucheJson/R001275/62224","version":8,"legislation":"GL2024","validFromDate":"2025-07-29T11:19:36.000+02:00","validUntilDate":"2025-11-26T09:40:59.000+01:00","versionActiveLobbyist":true},{"registerEntryId":58233,"jsonDetailUrl":"https://www.lobbyregister.bundestag.de/sucheJson/R001275/58233","version":7,"legislation":"GL2024","validFromDate":"2025-06-30T22:42:07.000+02:00","validUntilDate":"2025-07-29T11:19:36.000+02:00","versionActiveLobbyist":true},{"registerEntryId":48322,"jsonDetailUrl":"https://www.lobbyregister.bundestag.de/sucheJson/R001275/48322","version":6,"legislation":"GL2024","validFromDate":"2024-12-20T09:33:31.000+01:00","validUntilDate":"2025-06-30T22:42:07.000+02:00","versionActiveLobbyist":true},{"registerEntryId":38201,"jsonDetailUrl":"https://www.lobbyregister.bundestag.de/sucheJson/R001275/38201","version":5,"legislation":"GL2024","validFromDate":"2024-06-27T23:20:33.000+02:00","validUntilDate":"2024-12-20T09:33:31.000+01:00","versionActiveLobbyist":true}],"accountHasCodexViolations":false},"lobbyistIdentity":{"identity":"ORGANIZATION","name":"Salesforce.com Germany GmbH","legalFormType":{"code":"JURISTIC_PERSON","de":"Juristische Person","en":"Legal person"},"legalForm":{"code":"LF_GMBH","de":"Gesellschaft mit beschränkter Haftung (GmbH)","en":"Limited liability company (GmbH)"},"contactDetails":{"phoneNumber":"+4930692051346","emails":[{"email":"nkeim@salesforce.com"},{"email":"info-de@salesforce.com"},{"email":"barbara.holzer@salesforce.com"}],"websites":[{"website":"https://www.salesforce.com/de/"}]},"address":{"type":"NATIONAL","street":"Erika-Mann-Str.","streetNumber":"31","zipCode":"80636","city":"München","country":{"code":"DE","de":"Deutschland","en":"Germany"}},"capitalCityRepresentationPresent":true,"capitalCityRepresentation":{"address":{"type":"NATIONAL","street":"Kurfürstendamm","streetNumber":"194","zipCode":"10707","city":"Berlin"},"contactDetails":{"phoneNumber":"+4930692051346","email":"info-de@salesforce.com"}},"legalRepresentatives":[{"lastName":"Wettermark","firstName":"Joachim","function":"Geschäftsführer","recentGovernmentFunctionPresent":false,"entrustedPerson":false,"contactDetails":{}},{"lastName":"Jaccottet","firstName":"Stéphane","function":"Geschäftsführer","recentGovernmentFunctionPresent":false,"entrustedPerson":false,"contactDetails":{}},{"lastName":"McDonagh","firstName":"Lesa ","function":"Geschäftsführerin","recentGovernmentFunctionPresent":false,"entrustedPerson":false,"contactDetails":{}}],"entrustedPersonsPresent":true,"entrustedPersons":[{"academicDegreeBefore":"Dr.","lastName":"Holzer","firstName":"Barbara ","recentGovernmentFunctionPresent":false},{"lastName":"Keim","firstName":"Nina","recentGovernmentFunctionPresent":false},{"lastName":"Bischoff-Everding","firstName":"Nina","recentGovernmentFunctionPresent":false},{"lastName":"Müller","firstName":"Steffen","recentGovernmentFunctionPresent":false},{"lastName":"Loeb","firstName":"Eric","recentGovernmentFunctionPresent":false},{"lastName":"Wallner","firstName":"Alexander","recentGovernmentFunctionPresent":false}],"membersPresent":false,"membershipsPresent":true,"memberships":[{"membership":"Bitkom e. V."},{"membership":"AmCham Germany e. V."},{"membership":"EuroCloud Deutschland_eco e.V."},{"membership":"Wirtschaftsrat der CDU e.V."}]},"activitiesAndInterests":{"activity":{"code":"ACT_ORGANIZATION_V2","de":"Sonstiges Unternehmen","en":"Other company"},"typesOfExercisingLobbyWork":[{"code":"SELF_OPERATED_OWN_INTEREST","de":"Die Interessenvertretung wird in eigenem Interesse selbst wahrgenommen","en":"Interest representation is self-performed in its own interest"},{"code":"CONTRACTS_OPERATED_BY_THIRD_PARTY","de":"Die Interessenvertretung wird in eigenem Interesse durch die Beauftragung Dritter wahrgenommen","en":"Contracts are awarded to third parties to represent own interests of the company"}],"fieldsOfInterest":[{"code":"FOI_ECONOMY_FINANCE","de":"Bank- und Finanzwesen","en":"Banking and finance"},{"code":"FOI_MEDIA_COMMUNICATION","de":"Kommunikations- und Informationstechnik","en":"Communication and information technology"},{"code":"FOI_SP_FAMILY","de":"Familienpolitik","en":"Family policy"},{"code":"FOI_MEDIA_DIGITALIZATION","de":"Digitalisierung","en":"Digitalization"},{"code":"FOI_ECONOMY_INDUSTRIAL","de":"Industriepolitik","en":"Industrial policy"},{"code":"FOI_ENVIRONMENT_SUSTAINABILITY","de":"Nachhaltigkeit und Ressourcenschutz","en":"Sustainability and resource protection"},{"code":"FOI_ECONOMY_INSURANCE","de":"Versicherungswesen","en":"Insurance"},{"code":"FOI_EU_OTHER","de":"Sonstiges im Bereich \"Europapolitik und Europäische Union\"","en":"Other in the field of \"European politics and the EU\""},{"code":"FOI_ENVIRONMENT_CLIMATE","de":"Klimaschutz","en":"Climate protection"},{"code":"FOI_ECONOMY_SERVICES","de":"Handel und Dienstleistungen","en":"Trade and services"},{"code":"FOI_ECONOMY_ECOMMERCE","de":"E-Commerce","en":"E-commerce"},{"code":"FOI_SP_GENDER","de":"Geschlechterpolitik","en":"Gender politics"},{"code":"FOI_MEDIA_INTERNET_POLICY","de":"Internetpolitik","en":"Internet policy"},{"code":"FOI_EU_DOMESTIC_MARKET","de":"EU-Binnenmarkt","en":"EU internal market"},{"code":"FOI_SCIENCE_RESEARCH_TECHNOLOGY","de":"Wissenschaft, Forschung und Technologie","en":"Science, research and technology"},{"code":"FOI_SA_PUBLIC_SERVICE","de":"Öffentlicher Dienst und öffentliche Verwaltung","en":"Public service"},{"code":"FOI_ENVIRONMENT_SPECIES","de":"Artenschutz/Biodiversität","en":"Species protection/biodiversity"},{"code":"FOI_MEDIA_COPYRIGHT","de":"Urheberrecht","en":"Copyright"},{"code":"FOI_PUBLIC_FINANCE","de":"Öffentliche Finanzen, Steuern und Abgaben","en":"Public finances, taxes and duties"},{"code":"FOI_MEDIA_PRIVACY","de":"Datenschutz und Informationssicherheit","en":"Data protection and information security"},{"code":"FOI_ECONOMY_COMPETITION_LAW","de":"Wettbewerbsrecht","en":"Competition law"},{"code":"FOI_EU_LAWS","de":"EU-Gesetzgebung","en":"EU legislation"},{"code":"FOI_WORK_OTHER","de":"Sonstiges im Bereich \"Arbeit und Beschäftigung\"","en":"Other in the field of \"Work and employment\""},{"code":"FOI_ECONOMY_SAM_BUSINESS","de":"Kleine und mittlere Unternehmen","en":"Small and medium business"},{"code":"FOI_MEDIA_OTHER","de":"Sonstiges im Bereich \"Medien, Kommunikation und Informationstechnik\"","en":"Other in the field of \"Media, communication and information technology\""},{"code":"FOI_SP_DIVERSITY","de":"Diversitätspolitik","en":"Diversity policy"},{"code":"FOI_IS_CYBER","de":"Cybersicherheit","en":"Cyber security"}],"activityDescription":"Salesforce ist der führende Anbieter KI-basierter CRM-Lösungen und ermöglicht es Unternehmen, durch die Kombination von CRM + KI + Daten + Vertrauen auf einer einheitlichen Plattform auf völlig neue Weise mit ihren Kunden in Kontakt zu treten. Das Government Affairs & Public Policy Team vertritt die Interessen von Salesforce gemäß des Lobbyregistergesetzes. Das Team tritt in Kontakt mit Regierungsvertreter:innen und politischen Entscheidungsträger:innen, um zuzuhören, Verständnis und Fachwissen zu gewinnen und politische Entscheidungen mitzugestalten, die dem Interesse unserer Aktionäre, Kunden, Partnern, Belegschaft, der Gesellschaft und unserem Planeten dienen. \r\n\r\nSalesforce setzt sich dafür ein, Wachstum und Innovation in der digitalen Wirtschaft zu fördern. Dabei wird das Handeln von Salesforce geleitet durch die Grundwerte Vertrauen, Kundenerfolg, Innovation, Chancengleichheit und Nachhaltigkeit. \r\n\r\nNeben den persönlichen Gesprächsersuchen und ausgewählten eigenen Veranstaltungen und Positionspapieren, beteiligt sich Salesforce an einer Vielzahl von Branchenorganisationen, Koalitionen und Handelsverbänden, um unsere politischen Interessen und Geschäftsziele zu fördern. Wir überprüfen diese Mitgliedschaften mindestens einmal jährlich. Salesforce bedient sich zudem punktuell der Unterstützung von Rud Pedersen Public Affairs Germany GmbH.\r\n"},"employeesInvolvedInLobbying":{"relatedFiscalYearFinished":true,"relatedFiscalYearStart":"2024-02-01","relatedFiscalYearEnd":"2025-01-31","employeeFTE":2.39},"financialExpenses":{"relatedFiscalYearFinished":true,"relatedFiscalYearStart":"2024-02-01","relatedFiscalYearEnd":"2025-01-31","financialExpensesEuro":{"from":220001,"to":230000}},"mainFundingSources":{"relatedFiscalYearFinished":true,"relatedFiscalYearStart":"2024-02-01","relatedFiscalYearEnd":"2025-01-31","mainFundingSources":[{"code":"MFS_ECONOMIC_ACTIVITY","de":"Wirtschaftliche Tätigkeit","en":"Economic activity"}]},"publicAllowances":{"publicAllowancesPresent":false,"relatedFiscalYearFinished":true,"relatedFiscalYearStart":"2024-02-01","relatedFiscalYearEnd":"2025-01-31"},"donators":{"relatedFiscalYearFinished":true,"relatedFiscalYearStart":"2024-02-01","relatedFiscalYearEnd":"2025-01-31","totalDonationsEuro":{"from":0,"to":0}},"membershipFees":{"relatedFiscalYearFinished":true,"relatedFiscalYearStart":"2024-02-01","relatedFiscalYearEnd":"2025-01-31","totalMembershipFees":{"from":0,"to":0},"individualContributorsPresent":false,"individualContributors":[]},"annualReports":{"annualReportLastFiscalYearExists":true,"lastFiscalYearStart":"2024-02-01","lastFiscalYearEnd":"2025-01-31","annualReportPdfUrl":"https://www.lobbyregister.bundestag.de/media/b6/49/649419/salesforce-com-Germany-GmbH-2024-2025.pdf"},"regulatoryProjects":{"regulatoryProjectsPresent":true,"regulatoryProjectsCount":5,"regulatoryProjects":[{"regulatoryProjectNumber":"RV0008315","title":"Einführung eines \"Rechts auf Verschlüsselung\" im TTDSG","printedMattersPresent":false,"printedMatters":[],"draftBillPresent":true,"draftBill":{"customTitle":"Entwurf eines ersten Gesetzes zur Änderung des TelekommunikationTelemedien-Datenschutz-Gesetzes","customDate":"2024-02-07","leadingMinistries":[{"title":"Bundesministerium für Digitales und Verkehr","shortTitle":"BMDV","electionPeriod":20,"url":"https://bmdv.bund.de/DE/Home/home.html"}]},"description":"Im Rahmen der Novellierung des TTDSG soll ein “Recht auf Verschlüsselung” eingeführt werden. Salesforce setzt sich dafür ein, Augenmerk auf den Unterschied zwischen Privater- und Unternehmensanwendungen zu legen. Eine Verpflichtung der Ende-zu-Ende-Verschlüsselung (E2EE) würde im Kontext der betriebsinternen Kommunikation die Diensteanbieter vor eine Compliance-Herausforderung mit anderen gesetzlichen Auflagen führen und ist folglich nicht Mittel der Wahl. Salesforce setzt sich für eine Ausnahmeregelung für Unternehmensanwendungen ein.","affectedLawsPresent":true,"affectedLaws":[{"title":"Gesetz über den Datenschutz und den Schutz der Privatsphäre in der Telekommunikation und bei digitalen Diensten","shortTitle":"TTDSG","url":"https://www.gesetze-im-internet.de/ttdsg"}],"fieldsOfInterest":[{"code":"FOI_MEDIA_DIGITALIZATION","de":"Digitalisierung","en":"Digitalization"},{"code":"FOI_MEDIA_PRIVACY","de":"Datenschutz und Informationssicherheit","en":"Data protection and information security"}]},{"regulatoryProjectNumber":"RV0008316","title":"Risikobasierte KI-Regulierung, maßgeschneidert für Unternehmen","printedMattersPresent":false,"printedMatters":[],"draftBillPresent":false,"description":"Salesforce unterstützt einen nuancierten Ansatz einer maßgeschneiderten, risikobasierten KI-Regulierung, die Kontext und Verwendungszweck der Technologie differenziert und den Schutz von Personen sicherstellt, Vertrauen aufbaut und Innovationen begünstigt. Salesforce ist der Meinung, dass globale KI transparent sein sollte und im Einklang mit den bestehenden Datenschutzgesetzen stehen sollte. ","affectedLawsPresent":false,"affectedLaws":[],"fieldsOfInterest":[{"code":"FOI_MEDIA_DIGITALIZATION","de":"Digitalisierung","en":"Digitalization"}]},{"regulatoryProjectNumber":"RV0008317","title":"Entkopplung technischer und politischer Anforderungen an Cloud-Zertifikate","printedMattersPresent":false,"printedMatters":[],"draftBillPresent":false,"description":"Salesforce setzt sich für die rasche Verabschiedung eines praktikablen und nichtdiskriminierenden Zertifizierungssystems auf EU-Ebene ein. Aus Sicht von Salesforce ist eine Entkopplung von technischen und politischen Anforderungen im EUCS sowohl aus prozeduralen als auch aus rechtssystematischen Gründen notwendig. Nichttechnische Anforderungen (sog. immunity requirements) sollten aus Sicht von Salesforce nicht zum Gegenstand eines EU-Zertifizierungsrahmens gemacht werden.","affectedLawsPresent":false,"affectedLaws":[],"fieldsOfInterest":[{"code":"FOI_MEDIA_INTERNET_POLICY","de":"Internetpolitik","en":"Internet policy"},{"code":"FOI_IS_CYBER","de":"Cybersicherheit","en":"Cyber security"},{"code":"FOI_MEDIA_DIGITALIZATION","de":"Digitalisierung","en":"Digitalization"},{"code":"FOI_MEDIA_PRIVACY","de":"Datenschutz und Informationssicherheit","en":"Data protection and information security"},{"code":"FOI_MEDIA_COMMUNICATION","de":"Kommunikations- und Informationstechnik","en":"Communication and information technology"}]},{"regulatoryProjectNumber":"RV0017855","title":"Richtliniennahe Umsetzung von NIS-2","printedMattersPresent":false,"printedMatters":[],"draftBillPresent":true,"draftBill":{"customTitle":"Entwurf eines Gesetzes zur Umsetzung der NIS-2-Richtlinie und zur Regelung wesentlicher Grundzüge des Informationssicherheitsmanagements in der Bundesverwaltung","customDate":"2025-05-26","leadingMinistries":[{"title":"Bundesministerium des Innern und für Heimat","shortTitle":"BMI","electionPeriod":20,"url":"https://www.bmi.bund.de/DE/startseite/startseite-node.html"}]},"description":"Salesforce setzt sich für eine richtliniennahe Umsetzung der NIS2-Richtline der EU im Rahmen der nationalen Umsetzung in Deutschland ein. Es gilt, Doppelzuständigen hinsichtlich der Meldepflichten zu vermeiden und somit Rechtssicherheit zu schaffen.","affectedLawsPresent":true,"affectedLaws":[{"title":"Gesetz über das Bundesamt für Sicherheit in der Informationstechnik","shortTitle":"BSIG 2009","url":"https://www.gesetze-im-internet.de/bsig_2009"},{"title":"Verordnung zur Bestimmung kritischer Anlagen nach dem BSI-Gesetz","shortTitle":"BSI-KritisV","url":"https://www.gesetze-im-internet.de/bsi-kritisv"}],"fieldsOfInterest":[{"code":"FOI_EU_LAWS","de":"EU-Gesetzgebung","en":"EU legislation"},{"code":"FOI_MEDIA_INTERNET_POLICY","de":"Internetpolitik","en":"Internet policy"},{"code":"FOI_IS_CYBER","de":"Cybersicherheit","en":"Cyber security"}]},{"regulatoryProjectNumber":"RV0020827","title":"Digitalregulierung der EU innovationsfreundlicher ausgestalten (Digital Simplification Package)","printedMattersPresent":false,"printedMatters":[],"draftBillPresent":false,"description":"Salesforce setzt sich für einen kohärenten, technologieneutralen Ansatz der Digitalregulierung ein und befürwortet einen Regulierungsrahmen, der agil und zukunftsorientiert ist, um sich an die neuesten technologischen Entwicklungen wie generative KI und agentenbasierte KI anzupassen. Eine Vereinfachung des Regulierungsrahmens würde die Rechtssicherheit schaffen, die Unternehmen benötigen, um vertrauensvoll in KI-Lösungen zu investieren und diese einzusetzen.","affectedLawsPresent":false,"affectedLaws":[],"fieldsOfInterest":[{"code":"FOI_MEDIA_COMMUNICATION","de":"Kommunikations- und Informationstechnik","en":"Communication and information technology"},{"code":"FOI_MEDIA_DIGITALIZATION","de":"Digitalisierung","en":"Digitalization"},{"code":"FOI_MEDIA_INTERNET_POLICY","de":"Internetpolitik","en":"Internet policy"},{"code":"FOI_EU_LAWS","de":"EU-Gesetzgebung","en":"EU legislation"},{"code":"FOI_MEDIA_PRIVACY","de":"Datenschutz und Informationssicherheit","en":"Data protection and information security"}]}]},"statements":{"statementsPresent":true,"statementsCount":5,"statements":[{"regulatoryProjectNumber":"RV0008315","regulatoryProjectTitle":"Einführung eines \"Rechts auf Verschlüsselung\" im TTDSG","pdfUrl":"https://www.lobbyregister.bundestag.de/media/3b/e7/321374/Stellungnahme-Gutachten-SG2406200105.pdf","pdfPageCount":2,"text":{"copyrightAcknowledgement":"Die grundlegenden Stellungnahmen und Gutachten können urheberrechtlich geschützte Werke enthalten. Eine Nutzung ist nur im urheberrechtlich zulässigen Rahmen erlaubt.","text":"Ähnlich wie im KI-Kontext sehen wir auch bei der Einführung von Verschlüsselungsanforderungen im\r\nRahmen der TTDSG Novellierung einen entscheidenden Unterschied zwischen Privater- und\r\nUnternehmensanwendungen. Eine Verpflichtung der Ende-zu-Ende-Verschlüsselung (E2EE) könnte im Kontext der betriebsinternen Kommunikation die Diensteanbieter vor eine Compliance-Herausforderung mit anderen gesetzlichen Auflagen wie bspw. den Verpflichtungen als datenverarbeitende Stelle gem. der DSGVO führen."},"recipientGroups":[{"recipients":{"parliament":[],"federalGovernment":[{"department":{"title":"Bundesministerium für Digitales und Verkehr (BMDV) (20. WP)","shortTitle":"BMDV (20. WP)","url":"https://bmdv.bund.de/DE/Home/home.html","electionPeriod":20}}]},"sendingDate":"2024-05-24"}]},{"regulatoryProjectNumber":"RV0008315","regulatoryProjectTitle":"Einführung eines \"Rechts auf Verschlüsselung\" im TTDSG","pdfUrl":"https://www.lobbyregister.bundestag.de/media/da/12/388172/Stellungnahme-Gutachten-SG2412190065.pdf","pdfPageCount":2,"text":{"copyrightAcknowledgement":"Die grundlegenden Stellungnahmen und Gutachten können urheberrechtlich geschützte Werke enthalten. Eine Nutzung ist nur im urheberrechtlich zulässigen Rahmen erlaubt.","text":"Salesforce | Unser Gespräch u.a. zum Recht auf Verschlüsselung (TDDDG)\r\n@salesforce.com> Thu, Oct 10, 2024 at 10:51 AM\r\nTo: @bmdv.bund.de>\r\nHallo\r\nhab noch einmal vielen Dank, dass du dir Zeit für ein Gespräch zu aktuellen Themen\r\nrund um KI, Recht auf Verschlüsselung und Datenschutz genommen hast.\r\nWir sprachen über die Unterscheidung von Künstlicher Intelligenz im Unternehmenskontext und\r\nEndnutzerkontext. Das vorgestellte Whitepaper mit dem Titel: „Shaping the Future: A Policy Framework for Trusted\r\nEnterprise AI“ befasst sich mit der sich entwickelnden Landschaft der KI-Regulierung, dem nuancierten KI-Ökosystem\r\nund politischen Erwägungen für KI, mit besonderem Schwerpunkt auf Unternehmens-KI. Eine digitale Kopie des\r\nWhitepapers habe ich gerne dem Anhang der E-Mail beigefügt.\r\nÄhnlich wie im KI-Kontext sehen wir auch bei dem Einsatz von Messengerdiensten einen entscheidenden\r\nUnterschied zwischen Endkunden(B2C)- und Unternehmens(B2B)-anwendungen. Im Zuge eines \"Rechts auf\r\nVerschlüsselung\" im Kontext der TDDDG-Novellierung könnte eine Verpflichtung der Ende-zu-EndeVerschlüsselung (E2EE) die Diensteanbieter von betriebsinternen Kommunikationsdiensten vor eine signifikante\r\nCompliance-Herausforderung mit anderen gesetzlichen Auflagen wie bspw. den Verpflichtungen als\r\ndatenverarbeitende Stelle gem. der DSGVO stellen.\r\nWie ich aus dem Vortrag von aus dem Referat DP 25 im Rahmen der AK Datenschutz Sitzung des\r\nBitkom am 25.6. erfahren habe, legt der aktuelle Entwurf des Gesetzes den Fokus auf eine allgemeine Definition der\r\nVerschlüsselung und sieht keine Abgrenzung zwischen betriebsinternen und allgemein zugänglichen\r\nEinsatzbereichen der Dienste vor.\r\nAus Sicht von Salesforce sprechen wir uns mit Blick auf die Rechtssicherheit für Unternehmen für eine\r\nAusnahmeregelung der Verschlüsselungsverpflichtung für betriebsinterne Kommunikationsdienste aus.\r\n2021 wurde die Firma Slack von Salesforce übernommen. Der Slack-Dienst ist eine cloudbasierte Plattform für die\r\nTeamkommunikation. Slack richtet sich in erster Linie an Unternehmen, daher können wir aus erster Hand einige\r\nBeispiele nennen, die der Einführung einer E2EE für die unternehmensinterne Kommunikation entgegenstehen\r\nwürden.\r\nDiensteanbieter wie Slack haben vertragliche Beziehungen zu Kunden/Arbeitgebern und nicht zum Endnutzer. Diese\r\nBeziehungen beinhalten unterschiedliche Compliance- und rechtliche Verpflichtungen, von denen einige schwer\r\neinzuhalten sind, wenn E2EE erforderlich ist.\r\nBeispiel 1: Eine End-to-End-Verschlüsselung würde im Widerspruch zu Slacks Verantwortlichkeiten als\r\nDatenverarbeiter im Rahmen der DSGVO stehen, da sie Slack daran hindern würde, die Daten dem\r\nDatenverantwortlichen zur Verfügung zu stellen.\r\nSlack ist eine Unternehmenssoftware, die Arbeitgebern und anderen Organisationen zur\r\nErleichterung der Kommunikation am Arbeitsplatz zur Verfügung gestellt wird. Diese Kunden sind\r\nin der Regel Datenverantwortliche im Sinne der DSGVO und sie sind verpflichtet, dem Antrag\r\neines Endnutzers auf Herausgabe einer Kopie seiner Daten (sogenannter Antrag einer\r\nbetroffenen Person) nachzukommen. Slack ist als Datenverarbeiter verpflichtet, unsere Kunden\r\nbei diesen Anfragen zu unterstützen, indem wir ihnen technische und organisatorische Kontrollen\r\nzur Verfügung stellen, um in Slack enthaltene personenbezogene Daten (einschließlich SlackNachrichten) zu exportieren.\r\nWenn die Daten auf eine Weise verschlüsselt sind, die nur für Endbenutzer zugänglich ist, wäre\r\nSlack nicht in der Lage, seiner Verpflichtung nachzukommen, Datenverantwortliche als\r\nDatenverarbeiter bei Exporten zu unterstützen.\r\nBeispiel 2: E2EE würde es auch Arbeitgebern erschweren, ihren Compliance-Verpflichtungen\r\nnachzukommen, einschließlich der Durchführung interner Untersuchungen sowie der Einhaltung von\r\nFINRA/Finanzdienstleistungen.\r\nGerne werden wir diese Perspektive im Rahmen der erwähnten Verbändekonsultation einbringen. Sollten dieKolleg*innen des Fachreferats jedoch Rückfragen haben oder einen bilateralen Austausch zu dem Themenfokus\r\nwünschen, stehe ich gerne zur Verfügung.\r\nNoch einmal vielen Dank für deine Zeit und das Gespräch. Ich gehe davon aus, dass wir uns in zwei Wochen im\r\nRahmen des Digitalgipfels wiedersehen werden. Gemeinsam mit der Mercedes-Benz Group AG und der U.S.\r\nBotschaft in Berlin richtet Salesforce eine Session am 1. Veranstaltungstag um 10:15 Uhr aus unter dem Titel “Neue\r\nSphärengestalten - Wie können wir nationale Souveränität und internationale Spielräume im Cyberraum in Einklang\r\nbringen?”. Sie ist leider unglücklich parallel zum Vortrag von Minister Wissing geplant. Den Hinweis wollte ich\r\ndennoch weitergeben, falls jemand aus deiner Abteilung Interesse hat vorbeizukommen.\r\nBeste Grüße\r\n"},"recipientGroups":[{"recipients":{"parliament":[],"federalGovernment":[{"department":{"title":"Bundesministerium für Digitales und Verkehr (BMDV) (20. WP)","shortTitle":"BMDV (20. WP)","url":"https://bmdv.bund.de/DE/Home/home.html","electionPeriod":20}}]},"sendingDate":"2024-10-10"}]},{"regulatoryProjectNumber":"RV0008316","regulatoryProjectTitle":"Risikobasierte KI-Regulierung, maßgeschneidert für Unternehmen","pdfUrl":"https://www.lobbyregister.bundestag.de/media/82/4b/570150/Stellungnahme-Gutachten-SG2506250031.pdf","pdfPageCount":2,"text":{"copyrightAcknowledgement":"Die grundlegenden Stellungnahmen und Gutachten können urheberrechtlich geschützte Werke enthalten. Eine Nutzung ist nur im urheberrechtlich zulässigen Rahmen erlaubt.","text":"Nina Keim <nkeim@salesforce.com>\r\nSalesforce | Gesprächsanfrage zum Global Technology Summit am 10. - 12. April\r\n2025 in New Delhi\r\n@salesforce.com> Fri, Apr 4, 2025 at 8:00 AM\r\nTo: @bmdv.bund.de\r\nCc: @bmdv.bund.de, @salesforce.com>\r\nSehr geehrter Herr Staatssekretär,\r\nich darf mich im Namen von bei Salesforce mit einer\r\nherzlichen Bitte um ein persönliches Gespräch im Rahmen des anstehenden Global Technology Summit\r\nvon Carnegie India in New Delhi an Sie wenden. Sie beide werden Gäste der Abendveranstaltung am\r\n12. April mit Außenminister Jaishankar sein. bat mich daher anzufragen, ob Sie direkt vor dem\r\nDinner offen für einen Austausch mit ihm wären.\r\nAufbauend auf Ihrem Gespräch mit im Rahmen des letztjährigen AI Global Forums im Mai\r\n2024 in Seoul würde er sich sehr freuen, den Austausch zu vertrauensvoller Technologie in geopolitisch\r\nangespannten Zeiten im Kontext des Carnegie India Global Technology Summit fortzuführen sowie sich\r\nmit Ihnen über die Rolle von agentischer Künstlicher Intelligenz als “Digital Labor” Lösung für\r\nUnternehmen im Kontext des Fachkräftemangels auszutauschen.\r\nBei Salesforce investieren wir seit 2014 in KI-Forschung. 2016 haben wir unsere eigene\r\nvertrauenswürdige KI für Unternehmen, die Einstein-KI, eingeführt. Vor kurzem haben wir unsere neuste\r\nInnovation “Agentforce” auf den Markt gebracht, mit der unsere Kunden autonome KI-Agenten für\r\nzahlreiche Unternehmensfunktionen entwickeln und einsetzen können. Agentforce ist die erste Digital\r\nLabor Lösung für Unternehmen zur grenzenlosen Skalierung ihrer Belegschaft. Auf dieser umfassenden,\r\nvertrauenswürdigen Plattform arbeiten Menschen und KI-Agenten Hand in Hand für maximalen\r\nKundenerfolg.\r\nDabei unterstützt Salesforce eine gezielte, risikobasierte KI-Regulierung, die zwischen verschiedenen\r\nAnwendungsfällen der Technologie unterscheidet. Ein maßgeschneiderter Ansatz ist hier entscheidend,\r\num Vertrauen in die agentenbasierte KI aufzubauen und ihr volles Potenzial auszuschöpfen. Ziel sollte\r\nes aus Sicht von Salesforce sein, die Belegschaften zu schulen, um KI-Agenten zu verwalten und zu\r\nführen. Darüber hinaus sind zuverlässige Tests und Leitplanken für den Einsatz autonomer KI-Agenten\r\nebenso wie eine klare Kommunikation über KI-Interaktionen und Verantwortlichkeiten und klare Regeln\r\nzur Data Governance unerlässlich. Gerne führt unseren Ansatz zur vertrauensvollen KIAgenten Einführung im Gespräch weiter aus und steht für Ihre Rückfragen zur Verfügung.\r\nFür eine wohlwollende Terminprüfung sowie Ihre Bemühungen danke ich Ihnen und Ihrem Büro vorab\r\nsehr herzlich und stehe Ihnen und Ihrem Team bei Fragen und zur Terminvereinbarung gern jederzeit\r\nzur Verfügung.\r\nMit freundlichen Grüßen\r\n--\r\nGermany\r\nKurfürstendamm. 194, D- 10707 BerlinMobile:\r\nE-Mail:\r\nsalesforce.com Germany GmbH • Sitz: München • Amtsgericht München • HRB 158525 • Geschäftsführer: Joachim\r\nWettermark, Lesa McDonagh, Stéphane Jaccottet\r\nVorsitzende des Aufsichtsrats: Nina Keim\r\nEintrag im Lobbyregister für die Interessenvertretung gegenüber dem Deutschen Bundestag und der\r\nBundesregierung: Registernummer R001275"},"recipientGroups":[{"recipients":{"parliament":[],"federalGovernment":[{"department":{"title":"Bundesministerium für Digitales und Verkehr (BMDV) (20. WP)","shortTitle":"BMDV (20. WP)","url":"https://bmdv.bund.de/DE/Home/home.html","electionPeriod":20}}]},"sendingDate":"2025-04-04"}]},{"regulatoryProjectNumber":"RV0008316","regulatoryProjectTitle":"Risikobasierte KI-Regulierung, maßgeschneidert für Unternehmen","pdfUrl":"https://www.lobbyregister.bundestag.de/media/bd/6f/570152/Stellungnahme-Gutachten-SG2506250034.pdf","pdfPageCount":32,"text":{"copyrightAcknowledgement":"Die grundlegenden Stellungnahmen und Gutachten können urheberrechtlich geschützte Werke enthalten. Eine Nutzung ist nur im urheberrechtlich zulässigen Rahmen erlaubt.","text":"@salesforce.com>\r\nSalesforce | Vielen Dank für das heutige Gespräch + Einladung zum Agentforce\r\nBreakfast für einen Mitarbeitenden aus dem Büro\r\n1 message\r\n@salesforce.com> Tue, Apr 8, 2025 at 3:04 PM\r\nTo: @bundestag.de>, @bundestag.de>\r\nhabt vielen Dank, dass Ihr Euch heute Zeit für den Austausch mit mir genommen habt und mir somit die Möglichkeit\r\ngegeben habt, Salesforce und mich vorzustellen.\r\nIm Verlauf unseres Gesprächs kamen wir sowohl auf die Rahmenbedingungen für die agentische KI sowie auf die\r\nAuswirkungen auf die Umwelt und Emissionen von KI Modellen zu sprechen. Gerne reiche ich ein paar weitere\r\nInformationen zu den beiden Themenkomplexen mit dieser Nachricht hinterher (unten eingefügt).\r\nZudem füge ich unten eine Einladung zu dem von Salesforce ausgerichteten \"Agentforce Breakfast\" am 15.4. in\r\nBerlin ein. Diese praxisnahe Veranstaltung zur agentischen KI richtet sich an die Mitarbeitenden der Bundestagsbüros\r\nund Verbände. Wir würden uns sehr freuen, eine Person aus Euren Büros vor Ort begrüßen zu dürfen. Eine\r\nAnmeldung kann gerne direkt an mich erfolgen unter Angabe des vollständigen Namens und der E-Mail Adresse.\r\nIch wünsche für den weiteren Start im Bundestag weiterhin alles Gute und viel Erfolg. Sollte der Themenzuschnitt für\r\nDigitalisierung in dem Zuständigkeitsbereich von einem von Euch landen, werden wir uns sicher in Berlin das ein oder\r\nandere Mal über den Weg laufen. Bei Rückfragen sowie als Ansprechpartnerin für Salesforce in Deutschland stehe\r\nich Euch und Euren Teams gerne zur Verfügung.\r\nMit besten Grüßen\r\n1. Informationen - Vertrauenswürdige Agentische KI:\r\nBei Salesforce investieren wir seit 2014 in KI-Forschung. 2016 haben wir unsere eigene vertrauenswürdige KI für\r\nUnternehmen, die Einstein-KI, eingeführt. Vor kurzem haben wir unsere neuste Innovation “Agentforce” auf den Markt\r\ngebracht, mit der unsere Kunden autonome KI-Agenten für zahlreiche Unternehmensfunktionen entwickeln und\r\neinsetzen können. Auf dieser Plattform arbeiten Menschen und KI-Agenten Hand in Hand für maximalen\r\nKundenerfolg.\r\nDabei unterstützt Salesforce eine gezielte, risikobasierte KI-Regulierung, die zwischen verschiedenen\r\nAnwendungsfällen der Technologie unterscheidet. Ein maßgeschneiderter Ansatz ist hier entscheidend, um Vertrauen\r\nin die agentenbasierte KI aufzubauen und ihr volles Potenzial auszuschöpfen. Ziel sollte es aus Sicht von Salesforce\r\nsein, die Belegschaften zu schulen, um KI-Agenten zu verwalten und zu führen. Darüber hinaus sind zuverlässige\r\nTests und Leitplanken für den Einsatz autonomer KI-Agenten ebenso wie eine klare Kommunikation über KIInteraktionen und Verantwortlichkeiten und klare Regeln zur Data Governance unerlässlich.\r\nSalesforce Positionspapier zu Unternehmens-KI: Shaping the Future: A Policy Framework for Trusted\r\nEnterprise AI\r\nPositionspapier zu agentischer KI: Shaping Public Policies for Trusted AI Agents\r\n2. Informationen - AI Energy Score:\r\nKürzlich hat Salesforce gemeinsam mit der Hochschule Carnegie Mellon University sowie den Partnern Hugging Face\r\nund Cohere den „AI Energy Score“ ins Leben gerufen, der die Energie-Effizienz von derzeit 166 Sprachmodellen\r\nbewertet und vergleichbar macht. Dabei werden sowohl Open-Source- als auch proprietäre Modelle einbezogen.\r\nDer AI Energy Score misst die Energie-Effizienz von KI-Modellen während der Inferenz, also der Phase, in der ein\r\ntrainiertes Modell zur Verarbeitung neuer Daten eingesetzt wird. Er bewertet den Energieverbrauch anhand von\r\nstandardisierten Benchmarks für gängige KI-Aufgaben wie Text- und Bildgenerierung und berücksichtigt dabei\r\nFaktoren wie Hardware-Effizienz, Modellarchitektur und Ressourcennutzung.\r\nAuf einem öffentlich zugänglichen Leaderboard können die aktuellen Bewertungen eingesehen werden.3. Salesforce | Herzliche Einladung zum Agentforce Breakfast am 15. April 2025\r\nSehr geehrter Damen und Herren,\r\nDeutschland steht am Anfang einer neuen Legislaturperiode: die Weichen für die politische\r\nZukunft der nächsten Jahre werden gestellt. Dabei geht es auch um nichts Geringeres als die\r\nSicherung der Wettbewerbsfähigkeit der deutschen Wirtschaft und unseres\r\ngesamtwirtschaftlichen Wachstums.\r\nWir möchten dies zum Anlass nehmen, uns mit ausgewählten Gästen – darunter einen/eine\r\nVertreter:in aus Ihrem Team – zu einem unserer Kernthemen auszutauschen: KI-Agenten –\r\ndie dritte Welle der künstlichen Intelligenz (nach der prädiktiven und generativen KI).\r\nDenn bei Salesforce ist agentische KI keine Zukunftsmusik, sondern mit Agentforce bereits\r\nRealität!\r\nIm Mittelpunkt stehen dabei die Fragen: Wie werden Menschen und KI-Agenten zukünftig\r\nzusammenarbeiten, welche Chancen, aber auch Herausforderungen sind damit verbunden und\r\nwie gelingt vertrauenswürdige KI?\r\nDiese und Ihre Fragen möchten wir beantworten und danach auch praktisch werden: Nutzen\r\nSie die Gelegenheit, mit Hilfe unserer technischen Experten Ihren ersten eigenen KI-Agenten\r\nzu bauen!\r\nSie möchten dabei sein? Dann würde ich mich über eine kurze Anmeldungsmail mit Angabe\r\nvon Namen, Organisation und E-Mail freuen.\r\nMit freundlichen Grüßen,Datum und Zeit\r\nDienstag, 15. April 2025\r\n09:00 - 10:30 Uhr\r\nProgramm\r\n09:00 Uhr – Ankunft & Kaffee\r\n09:10 Uhr – Einführung Salesforce & Vorstellung Agentforce White Paper\r\n09:30 Uhr – Agentforce Demo\r\n09:45 Uhr – Build your own Agent!\r\n10:00 Uhr – Offener Austausch bei Frühstück\r\n10:30 Uhr – Ausblick und Verabschiedung\r\n📍 Addresse\r\nSalesforce\r\nKurfürstendamm 194\r\n10707 Berlin\r\n--\r\nGermany | Salesforce\r\n| Salesforce\r\nKurfürstendamm. 194, D- 10707 Berlin\r\nMobile:\r\nE-Mail:\r\nsalesforce.com Germany GmbH • Sitz: München • Amtsgericht München • HRB 158525 •\r\nGeschäftsführer: Joachim Wettermark, Lesa McDonagh, Stéphane Jaccottet\r\nVorsitzende des Aufsichtsrats: Nina Keim\r\nEintrag im Lobbyregister für die Interessenvertretung gegenüber dem Deutschen Bundestag und der\r\nBundesregierung: Registernummer R001275--\r\nGermany\r\nKurfürstendamm. 194, D- 10707 Berlin\r\nMobile:\r\nE-Mail:\r\nsalesforce.com Germany GmbH • Sitz: München • Amtsgericht München • HRB 158525 • Geschäftsführer: Joachim\r\nWettermark, Lesa McDonagh, Stéphane Jaccottet\r\nVorsitzende des Aufsichtsrats: Nina Keim\r\nEintrag im Lobbyregister für die Interessenvertretung gegenüber dem Deutschen Bundestag und der\r\nBundesregierung: Registernummer R001275Shaping the Future:\r\nA Policy Framework\r\nfor Trusted Enterprise AISHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 2\r\nContents\r\nIntroduction\r\nHow Enterprise AI is different\r\nfrom Consumer AI\r\nSalesforce’s Trusted\r\nEnterprise CRM AI\r\nRecommendations to\r\nPolicymakers to Foster\r\nResponsibility, Innovation,\r\nand Competition in\r\nEnterprise CRM AI\r\nConclusion\r\n03\r\n04\r\n06\r\n09\r\n13SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 3\r\nIntroduction\r\nMotivated by the emergence of generative artificial intelligence\r\n(“GenAI”), governments worldwide are prioritizing regulatory and\r\npolicy frameworks for artificial intelligence (“AI”).\r\nAI is not a monolithic technology and is used in many different ways and contexts,\r\ncreating different types of risk. Furthermore, AI ecosystems have increased in complexity\r\nwith a number of actors involved in various stages of the lifecycle of AI products and\r\nservices. Some of the main elements of the AI ecosystems are:\r\nData: All AI starts with data and there are varying sources to gain the data that\r\npowers AI tools such as data aggregators, data brokers, and consumers themselves.\r\nAI models, including Large Language Models (LLMs): These models are carefully\r\ncrafted using research techniques and trained using a combination of public and\r\nprivately curated data.\r\nCompute Hardware Providers: The providers host several layers of data, allowing\r\ntheir customers to both train Large Language Models (“LLM”) and process requests\r\nto use the model after it’s been trained.\r\nInfrastructure optimization providers: Entities that provide tools and services that\r\nmake for more efficient and higher-quality model training such as fine-tuning with\r\nspecialized, proprietary data to better meet the needs of a particular company.\r\nCloud Platforms: These digital spaces allow developers to tap into the computing\r\npower in a cloud deployment model that can also provide applications to help\r\ncustomers organize their data.\r\nApplication Creators: Entities that build applications on top of models that cater\r\nto the unique needs of users and clients to provide services like app building or\r\nenhanced business intelligence.\r\nDenotes a role that Salesforce fulfills.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 4\r\nAs governments are working to establish their approach to AI, they should acknowledge these\r\nnuances, focusing on high-risk AI, while clearly delineating appropriate responsibilities at\r\nevery layer of the AI value chain to ensure that entities can meaningfully and correctly engage\r\nin the development of trusted AI.\r\nWhile there is diversity in both the AI value chain and its use cases, the current regulatory and\r\npolicy environment is narrowly focused on a few aspects of AI. The purpose of this paper is to\r\nbring attention to the Enterprise AI perspective and its particular relevance to this conversation.\r\nEnterprise AI, that is AI developed for and deployed in business settings, has several inherent\r\ncharacteristics that differentiate it from Consumer AI, including: the business model that\r\ngenerally does not monetize customer data; the use-specific contexts for which it is developed\r\nand in which it is deployed as opposed to the open-ended nature of Consumer AI; and the\r\nhigher levels of privacy, security, fairness and accuracy as a result of customer expectations and\r\ncontractual commitments. Salesforce, as an enterprise company specializes in AI customer\r\nrelationship management (CRM) solutions and we are taking additional measures that set us\r\napart both from Consumer AI and other Enterprise AI companies.\r\nHow Enterprise AI is different\r\nfrom Consumer AI\r\nContext-specific\r\nEnterprise AI applications are usually use-case specific and created for particular work\r\ncontexts, e.g. productivity or CRM tools, as opposed to the open-ended nature of Consumer\r\nAI. Enterprise AI applications often operate in a more closed work environment with limited\r\npotential inputs and outputs. Even in the cases of enterprise uses of generative AI, both the\r\nprompt and the data that grounds the prompt have been developed to ensure an optimized\r\noutput for the customer. Consumer AI is usually asked to perform general tasks that can greatly\r\nvary depending on the user, including creating images, a real human’s voice and likeness, or\r\ncreating lifelike videos. The very broad contexts in which Consumer AI can be used make it\r\ngenerally more prone to misuse and potentially harmful effects.\r\nEnterprises need to have the same capabilities that are captivating consumers,\r\nbut they need to have it with trust, and they need to have it with security.\r\n- Marc BenioffSHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 5\r\nGrounded on trusted data\r\nEnterprise AI systems are grounded on and operate on curated data, which generally is\r\nconsensually obtained from enterprise customers, and are deployed in more controlled\r\nenvironments. This limits the risk of hallucinations and increases accuracy. In contrast, in\r\nConsumer AI, the data can come from an array of sources such as the users themselves or,\r\nmore broadly, the public Internet.\r\nHigh levels of data privacy, security, and accuracy\r\nDepending on their place in the world, consumers may be covered by data protection and\r\ncontent laws. Enterprise AI companies often go beyond legislative requirements, for instance\r\nto be able to service their customers that operate in highly regulated industries, like the\r\ngovernment, financial services, and healthcare. These organizations, by virtue of their own\r\nregulatory requirements, demand service providers that can from day one, ensure robust privacy,\r\nsecurity and accountability controls to prevent bias, toxicity, and hallucinations. Customers also\r\nwant to have the ability to audit their operations. Enterprise AI companies offer these types of\r\nsafeguards to their customers, as their reputation and competitive advantage rely on it.\r\nContractual obligations\r\nThe relationship between an Enterprise AI provider and its customers is underpinned by\r\ncontracts or even procurement rules, which clearly describe the rights and obligations of each\r\nparty, including third-party vendors, all to provide more reassurance across the value chain.\r\nIn general, the relationship also means that Enterprise AI companies are handling data in line\r\nwith the contractual obligations and their ethical guidelines. Further, these same contracts are\r\nregularly reviewed to remain aligned with the high standards of the business customers and\r\nresponsive to the risk environment. In contrast, Consumer AI companies have created terms of\r\nservice that consumers can read to understand what data will be collected and its use, but lack\r\nthe ability to negotiate or tailor these terms to all their specific preferences.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 6\r\nSalesforce’s Trusted\r\nEnterprise CRM AI\r\nFounded in 1999, Salesforce is a global leader in cloud enterprise software for customer\r\nrelationship management (CRM), providing software-as-a-service and platform-as-a-service\r\nofferings to businesses, governments, and other organizations around the world. Our\r\ncustomers are companies of all sizes and across all sectors that use our tools to connect in new\r\nways with their own customers, employees and citizens.\r\nSalesforce has been active in the research and development of AI technologies for almost a\r\ndecade. In 2014, we established Salesforce AI Research, and in 2016 we introduced our first\r\nAI functionalities into our products under the “Salesforce Einstein” brand. Salesforce Research\r\nhas published 200+ research papers and registered 300+ AI patents. In 2018, we established\r\nour Office of Ethical and Human Use of Technology.\r\nSalesforce is not Consumer AI. We are trusted Enterprise CRM AI. We provide our customers\r\nwith Enterprise AI applications that are highly specialized and relevant to their needs. We\r\nare focused on producing specialized AI models that perform everyday work tasks like email\r\ngeneration, summaries of sales or service calls, or surfacing of relevant information during\r\ncustomer support interactions. We also introduced “Einstein Copilot”, a conversational AI\r\nassistant that our customers can converse with directly to solve issues faster. We also act as an\r\nintermediary between other GenAI providers and our customers by integrating their models\r\ninto our products and services.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 7\r\nAt Salesforce, trust is our #1 value, which means we develop and deploy AI with trust,\r\nsecurity and ethics at the center. We take measures to ensure our AI tools are developed\r\nresponsibly and we also establish further guardrails to assist our customers in the\r\ndeployment of trusted AI, including:\r\n• Our customers’ data is not our product. We have strict rules around the viewing,\r\nprocessing, and disclosure of our customers’ data. Salesforce and our partners\r\nadhere to our strict data policies and controls, which are also outlined in our\r\ncontracts and data processing addendum.\r\n• Our Einstein Trust Layer, which is a secure architecture natively built into the\r\nSalesforce platform, addresses the concerns of our customers associated with\r\nusing GenAI. The Einstein Trust Layer is equipped with security guardrails that allow\r\nSalesforce customers to benefit from GenAI without compromising their customer\r\ndata. The Einstein Trust Layer prevents third-party LLMs from retaining sensitive\r\ncustomer data, and masks the data when the prompt is shared with LLMs. Finally, it\r\nallows for our customers to audit their AI-generated outputs.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 8\r\n• Our AI Acceptable Use Policy (AI AUP) guides our customers in the responsible\r\nuse of our products with guidelines around prohibited actions such as\r\nautomated decision-making processes with legal effects, mandating human\r\ncontrol, and disclosures.\r\n• We design powerful system-wide controls that put humans in control of AI\r\noutcomes (what we call “human at the helm”). These controls include our\r\nPrompt Builder, audit trails, and robust data controls in our Data Cloud.\r\n• We provide our customers with “mindful friction” as they interact with AI, for\r\nexample, we flag for users when the sorting or evaluation of their data based on\r\ncertain categories such as zip codes could introduce bias.\r\n• We create, test, and improve the prompt templates used by Einstein to provide\r\nconsistently useful, high-quality responses.\r\n• We employ red teaming, a process that involves intentionally trying to find\r\nvulnerabilities in a system by anticipating and testing how users might use and\r\nmisuse it, to make sure that our gen AI products hold up under malicious input.\r\n• We maximize choice for our customers by being open, extensible, and model\r\nagnostic in our integrations with the emerging AI ecosystem. We aim to enable\r\ncustomers to use the LLMs of their choice with Salesforce’s services – whether\r\nthat be a model provided by a third party, Salesforce, or the customer itself.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 9\r\nRecommendations to\r\nPolicymakers to Foster\r\nResponsibility, Innovation,\r\nand Competition in\r\nEnterprise CRM AI\r\nSalesforce is committed to building trusted, transparent, open,\r\ninteroperable and accountable systems. As a service provider servicing\r\norganizations of all sizes, in multiple jurisdictions, and across many\r\nsectors, we are in a unique position to observe global trends in AI\r\ntechnology and to identify developing areas of risk and opportunity.\r\nWe believe that harnessing the power of AI in a trusted way will require\r\ngovernments, businesses, and civil society to work together to advance\r\nresponsible, safe, risk-based and globally interoperable AI policy and\r\nregulatory frameworks.\r\nIn this spirit, we offer the following recommendations to convey the Enterprise AI viewpoint\r\nand provide some suggestions to policymakers to ensure the development of trustworthy AI.\r\nDefinitions of AI actors\r\nThe diversity and rapid evolution of the AI ecosystem calls for a nuanced approach to\r\nassigning responsibilities to different actors. Therefore, it is important for policymakers to\r\nhave a clear understanding of the different roles these actors play. In the enterprise space,\r\ncreators of AI (“AI Developers”) often build general customizable AI tools, of which the\r\nintended purpose can be low-risk, e.g. Enterprise AI CRM. It is then up to the customer (“AI\r\nDeployer”) to decide how these tools are employed. The customer also controls the data that\r\nis submitted to the AI system. Further, the advent of generative AI has introduced a new role:\r\nthat of the distributor. This is an entity that is neither developing, nor deploying an AI system,\r\nbut rather facilitating access. These roles are not mutually exclusive, meaning that one\r\ncompany could operate as a developer, a deployer, and a distributor. Overbroad definitions\r\nsubject companies to a series of obligations with which they may be unable to comply.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 10\r\nWhile the AI landscape has and will continue to evolve, at this time, we support\r\nnarrow, and targeted definitions of developer, deployer, and distributor.\r\n• Developers should be defined as entities that design, code, or produce AI\r\nsystems. This definition accounts for companies making both predictive\r\nand generative AI systems.\r\n• Deployers should be defined as entities that are using or modifying an AI\r\nsystem under their authority. This definition is important because while\r\ndevelopers make AI systems, some of these systems are customizable and\r\nbecome specific to the deployer once the deployer inputs its data.\r\n• Distributors should be defined as entities other than the developer or\r\ndeployer that integrates an AI system into a downstream application or\r\nsystem without substantial or intentional modification. Distributors provide\r\ncustomers with a platform or interface that allows general-purpose systems\r\nto be tailored to fulfill more narrow business applications of AI.\r\nRisk-Based Approach\r\nAI policy frameworks should be risk-based and appropriately address the full\r\nspectrum of potential harms caused by AI systems. Definitions of high-risk AI\r\nshould be narrow and take into account the following considerations:\r\n• Activity that has a high-risk of physical impact such as management or\r\noperation of critical infrastructure in energy, transportation, and water;\r\n• Economic impact, including automated determinations of eligibility for\r\ncredit, employment, educational institutions, or public assistance services;\r\n• Government decision-making such as law enforcement/criminal justice\r\nand migration/asylum;\r\n• Impact on democracy and the rule of law, for example, the spread of\r\ndisinformation at scale; and\r\n• Violations of internationally recognized human rights.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 11\r\nTransparency\r\nSalesforce believes that humans and technology work best together. To facilitate human\r\noversight of AI technology, transparency is critical. This means that humans should be in\r\ncontrol, and equipped with the documentation to understand the genesis, limitations,\r\nand proper use of the AI system. Salesforce is advocating for transparency provisions that\r\nare responsive to the nuances of various roles in the AI value chain as described above.\r\n• Documentation: Developers should provide their deployers and distributors with\r\ninformation such as model cards and a document outlining the proper use of the\r\nsystem to help deployers or end-users correctly utilize the system.\r\nDeployers know both the context and data inputs powering the AI system.\r\nThey should provide end-users with information about the proper use of the AI\r\nsystem and perform assessments of the AI model. Deployers should also ensure\r\nthere are clear terms of use for end-users. There should be some reasonable\r\nexpectation of transparency for end-users and governments if high-risk systems\r\nare being utilized.\r\nDistributors should provide information on their data governance program to\r\nboth the developers and deployers that interact with their platform. Details\r\nshould include policies on data retention, data minimization efforts, and audit\r\nprocedures.\r\n• Human Control: Deployers should ensure that a human is making the final\r\ndetermination when a model is being used in a high-risk situation. These\r\nusers should also be encouraged to consult other factors beyond the system’s\r\nrecommendations.\r\n• Notice: In instances where deployers are using AI to make high-risk evaluations\r\nof individuals, deployers should publish the model’s decision-making framework,\r\nand provide individuals with the notice that their data is being processed using\r\nan AI tool.\r\n• Disclosures: Deployers should make a disclosure when end-users or consumers\r\nare interacting directly with automated systems and it is not obvious it is an AI\r\nsystem. Further, AI outputs should be labeled as such to inform consumers\r\nwhere it appears to be human-generated or original content.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 12\r\nData Governance\r\nAs a service provider entrusted with the data of companies large and small, in multiple\r\njurisdictions, and across many sectors, Salesforce understands the importance of data. As a\r\nfirst principle, Salesforce believes that a comprehensive data protection law and other sound\r\ndata governance practices are foundational to responsible AI.\r\n• Data Minimization and Storage Limitation: Everyone in the AI value chain should\r\nendeavor to only store personal data for as long as it’s required and for the originally\r\nintended purpose of that data. Developers, deployers, and distributors should all have\r\nan external policy outlining clear rationales for the retention of data as well as clear\r\ntimeframes for its deletion.\r\n• Chain of Custody/Data Provenance: Developers, deployers, and distributors, should be\r\nclear with users about what is being done with the data with which they are entrusted.\r\nFor example, Salesforce utilizes changelog abilities which track information on what\r\nwas created by AI, when, by which system, and how that AI-generated item (action,\r\ncontent, etc.) flowed through the system.\r\nGlobally Interoperable and Inclusive AI Policy Frameworks\r\nGlobally interoperable AI policy frameworks based on common principles will create more\r\ndurable, robust and, eventually, long-standing AI norms. Global consistency of AI policy\r\nframeworks will further ensure that the challenges presented by AI can be tackled collectively,\r\nwhereas the benefits can be shared by many. It is also important that global efforts on AI\r\ngovernance remain inclusive, incorporating views from diverse geographies, economic sectors,\r\nand disciplines.\r\n• Salesforce supports the multilateral consensus-driven work that is occurring in spaces\r\nlike the G7, the United Nations, and the Organization for Economic Co-operation and\r\nDevelopment (OECD).\r\n• To complement robust AI policy frameworks, Salesforce supports the free flow of data\r\nto and from countries provided it is subject to appropriate safeguards. The ability to\r\ntransfer data between jurisdictions in a seamless and responsible manner supports\r\nboth a high level of data protection and continued innovation.SHAPING THE FUTURE: A POLICY FRAMEWORK FOR TRUSTED ENTERPRISE AI 13\r\nConclusion\r\nSalesforce believes in the tremendous opportunities that AI can bring to individuals\r\nand businesses alike - with proper governance. To that end, Salesforce supports a\r\nmulti-stakeholder approach to AI policymaking, prioritizing the design of flexible,\r\nnuanced, and adaptive policies that respond to the rapid pace of AI innovation.\r\nEnterprise AI companies have unique perspectives on how to tackle some of the\r\nmost pressing concerns policymakers are grappling with. We look forward to sharing\r\nour expertise on trusted Enterprise AI CRM with governments, industry, and civil\r\nsociety as they are debating AI governance efforts.\r\nLearn more about Salesforce Public Policy here.\r\nThank YouThe Next Frontier in\r\nEnterprise AI:\r\nShaping Public Policies\r\nFor Trusted AI Agents2\r\nContents\r\nIntroduction: AI agents – Boosting\r\nproductivity, empowering humans\r\nKey considerations for AI agents\r\nAgents powered by trusted\r\nenterprise AI\r\nLooking Ahead: Fostering an\r\nagent-ready digital ecosystem\r\n• Government services as a role model for agentic AI and humans together\r\n• Public policy that enhances stakeholder trust\r\n• Workforce skilling for agentic AI and humans together\r\nConclusion\r\n03\r\n05\r\n06\r\n08\r\n153\r\nIntroduction\r\nAI agents are a technological revolution - the third wave of artificial intelligence after predictive and\r\ngenerative AI. They go beyond traditional automation, being capable of searching for relevant data,\r\nanalyzing it to formulate a plan, and then putting the plan into action. Users can configure agents\r\nwith guardrails that specify what actions they can take and when tasks should be handed off to\r\nhumans.\r\nAI agents are also an exciting next step in the economic revolution powered by AI. With their ability\r\nto independently tackle complex problems, AI agents promise to deliver the agility, efficiency,\r\nand competitiveness that organizations of all sizes seek. Unlike previous tech transformations that\r\ndemanded years of expensive infrastructure development, AI agents can be quickly and easily built\r\nand deployed, significantly increasing capacity.\r\nIt is estimated that 75% of AI’s ultimate value lies in the “front office,” where businesses directly\r\ninteract with their customers. Yet 41% of employee time is currently lost to repetitive, low-value\r\ntasks that have little to do with this essential front-office work. AI agents have the potential to ease\r\nthese burdens, allowing workers to focus on more meaningful and strategic tasks, making work not\r\nonly more productive, but more fulfilling.\r\nAgentic AI is already here. Innovative organizations have started deploying and benefitting from\r\nAI agents in concrete ways. However, for governments, enterprises, and the workforce to be able\r\nto harness the full potential of AI agents, they will need more than just the right technology;\r\nthey will need the right public policies to help them become agent-ready. At a time when\r\nglobal policymakers seek pathways to economic growth, the advent of AI agents offers a unique\r\nopportunity to think deeply about the policies that will enable the diffusion of trusted AI in\r\ngovernment and the enterprise, and equip the workforce with the necessary skills to unlock AI’s full\r\npotential for enhanced productivity and more meaningful work.\r\nThis paper outlines key considerations for designing and using AI agents, and describes how\r\nenterprise AI can help ensure this is done in a safe and responsible way. It also provides a series of\r\nrecommendations for policymakers who want to accelerate adoption of these new, productivity\r\nenhancing tools.\r\nIn surveys conducted for the OECD, four in five workers said that AI had improved their\r\nperformance at work and three in five said it had increased their enjoyment of work.\r\nWorkers were also positive about the impact of AI on their physical and mental health, as\r\nwell as its usefulness in decision making.\r\nWith Agentforce to handle routine inquiries, publisher Wiley’s employees have more time\r\nto focus on complex cases, outperforming the company’s previous chatbot by 40% case\r\nresolution in the first few weeks of use.4\r\nWhat are AI agents?\r\nAI agents build on earlier innovations, including chatbots and AI assistants. These all play a role in task\r\nautomation; however, there are differences in their levels of sophistication and personalization in serving\r\ntheir users and the range of tasks they can perform.\r\nGrounded on trusted data\r\nChatbots are relatively simple algorithms that provide answers to simple questions, often based on\r\npredefined rules. Some chatbots may use basic natural language processing, while others may give preprogrammed answers.\r\nAI assistants or copilots use more advanced natural language processing and large language models\r\n(LLMs) to address a wider range of questions that the AI system may not be explicitly programmed to\r\nanswer in advance. When prompted, they produce outputs based on data used to train or fine tune the\r\nLLM, or may be able to retrieve information from integrated data sources.\r\nAI agents do not just answer questions in response to user prompts - they can also initiate, plan, and\r\nexecute multi-step processes to achieve user-defined objectives. AI agents can adapt to the evolving\r\nneeds of the enterprise, continuously learning from every interaction, becoming more effective and\r\naccurate over time. They can integrate with different underlying models and data streams, and be\r\nconfigured to handle diverse tasks and workflows. While chatbots and assistants might still be useful for\r\nsimpler interactions, agents are the better choice for more complex scenarios that require reasoning.5\r\nKey considerations for AI agents\r\n• Humans working with agents: Employees, from business leaders to frontline workers, will need\r\nnew skills to configure, task, manage, and oversee AI agents. Agents will need to be easy to program\r\nand use in a variety of contexts.\r\n• Reliability: AI agents must be carefully designed and equipped with guardrails to ensure clear\r\nand smooth handoffs to humans, as well as to minimize, flag, and correct hallucinations. Careful\r\nengineering and robust testing are required to ensure the accuracy and reliability of agents.\r\n• Fluency across multiple domains: AI agents will interact with users and third parties within and\r\noutside of the enterprise, retrieving, interpreting, and acting on different types of information\r\nacross these domains. This requires advanced programming and thoughtful integration of business\r\nprocesses and data systems.\r\n• Transparency and explainability: Users need to know when they are interacting with an AI agent\r\ninstead of a human. Customers, regulators, and the public will also expect AI agents’ outputs and the\r\nsequence of steps they follow in taking an action to be transparent and explainable.\r\n• Accountability: It will be important to provide clarity around who is responsible for ensuring that the\r\nagent is working properly and that its output is accurate.\r\n• Data governance and privacy: AI agents may require access to personal or other sensitive data to\r\ncomplete their assigned tasks. For users and enterprises to trust AI agents, they will have to operate\r\nwith high standards of privacy and data security.\r\n• Security: Like other AI applications, agents may be vulnerable to adversarial attacks, where malicious\r\ninputs are designed to deceive the AI into producing bad outputs. As AI agents take on increasingly\r\ncomplex tasks, adhering to best practices for AI safety and quality control will be essential.\r\n• Ethics: Companies that use AI agents should ensure they follow ethical guidelines consistently and\r\nreliably. This will require developing new protocols and norms for autonomous AI systems, fostering\r\neffective human-AI collaboration, and building consensus and confidence in decision-making\r\nprocesses.\r\n• Agent-to-agent interactions: Common protocols and standards will be important to instill trust\r\nand help ensure controlled, predictable, and accountable AI agent behavior. Fundamental to this\r\nis a secure information exchange environment and, when relevant, audit trails of agent-to-agent\r\ninteractions.\r\nUsing Agentforce, reps at leading ed tech company Carnegie Learning were able to focus\r\non high-value prospects, reducing account research time from up to an hour to just five to\r\nten minutes—a reduction up to 92%6\r\nAgents powered by trusted enterprise AI\r\nMotivated by the emergence of generative artificial intelligence (“GenAI”),\r\ngovernments worldwide are prioritizing regulatory and policy frameworks for\r\nartificial intelligence (“AI”).\r\nAt Salesforce, trust is our #1 value. From the outset, we have been 100% focused on ensuring\r\nthat we develop, deploy, and distribute AI with trust, security and ethics at the center. Enterprise AI\r\ndemonstrates higher levels of privacy, security, and accuracy, and Salesforce implements a number of\r\nmeasures to address business concerns with AI.\r\nAgentforce is our newest AI innovation, bringing together humans with agents to help enterprises\r\nconnect to their customers in a whole new way. Agentforce is a suite of out-of-the-box AI agents and\r\na set of tools to build, customize, and deploy them across any industry. Agents built with Agentforce can\r\ncomplete tasks such as analyzing data, making decisions, and taking action to answer customer service\r\ninquiries, qualify sales leads, and optimize marketing campaigns. All this is built on our trusted Salesforce\r\nplatform, which means that customers do not have to bear the high technical complexity and large\r\nfinancial cost of building or training their AI from scratch - the same way that customers do not have to\r\nbuild their own cloud infrastructure. Agentforce provides secure access to third-party LLMs through the\r\nEinstein Trust Layer and uses retrieval-augmented generation (RAG) to apply customers’ trusted data\r\nsources to those LLMs to ensure reliable outputs.\r\nWatch Video7\r\nOur core values and trusted AI principles remain central to\r\nAgentforce.\r\n• Trust: The core tenet of our business model remains unchanged: our customers’ data is not our\r\nproduct, meaning that, when it comes to AI, customers control how their data is used.\r\n• Data: Integrity and quality of data are critical to ensure effective problem-solving. Salesforce’s Data\r\nCloud and our new retrieval-augmented generation (RAG) functionality allow organizations\r\nto securely leverage their own trusted data to reduce the risk of hallucinations and improve the\r\nperformance and reliability of the agents.\r\n• Accuracy: The Atlas Reasoning Engine is the system that acts as the “brain” inside Agentforce. It\r\nstarts by evaluating user queries and refining them for clarity and relevance. Then, it retrieves the\r\nmost relevant data and builds a plan for execution. Our research showed that the results Agentforce\r\ncould deliver were twice as relevant and 33% more accurate than other available solutions.\r\n• Privacy & Security: Every Agentforce interaction runs through the Salesforce Einstein Trust Layer, a\r\ncomprehensive set of security measures and protocols designed to protect the privacy and security\r\nof customer data. The bedrock principle of the Trust Layer is zero data retention, meaning that data\r\nis used to generate outputs but never to improve the underlying models. In addition, the Trust Layer\r\noffers auxiliary features like dynamic grounding, toxicity detection at the input and output levels, and\r\nan audit trail to track AI agent actions and outputs.\r\n• Guardrails: Agentforce comes with a set of features and controls that reinforce trusted behavior, and\r\nprevent deviations from the intended behavior of AI agents. Customers can use natural language\r\nto create instructions and guardrails for their agents, including which actions an agent can and\r\ncannot take or when to escalate or hand off a task to a human. Customers can also easily adjust the\r\nguardrails to fit their specific needs.\r\n• Open ecosystem: Agentforce can easily connect to Salesforce-provided as well as third-party\r\nenterprise systems, data lakes, and warehouses that our customers use, allowing them to make\r\nthe most of their technology investments. We have also launched the world’s first agent partner\r\nnetwork, enabling customers to access a catalog of third-party skills, actions, and agents to increase\r\nthe capabilities of their agents.\r\n• Responsible innovation: Our guiding principles for the responsible development of agentic AI are\r\naccuracy, safety, honesty, empowerment, and sustainability. Our Office of Ethical and Humane Use\r\nhas designed a set of trust patterns, that are additional to the Einstein Trust Layer, to ensure that\r\nAgentforce is reliable and transparent. For instance, Agentforce products use standard language to\r\nalert administrators and agent managers when they’re about to implement or use AI agents. These\r\nnotes highlight the capabilities and limitations of AI, ensuring a clear understanding of its impact and\r\npotential.8\r\n• Testing & evaluation: To ensure the reliability of our AI agents and relevant safeguards, we conduct\r\nrigorous testing and red teaming, including adversarial testing. Before launching Agentforce, we\r\nsubjected our AI agents to over 8,000 adversarial inputs to pressure-test their boundaries. We have\r\nalso published the world’s first LLM benchmark for customer relationship management (CRM)\r\nmodels.\r\n• Sustainability: Our Sustainable AI Policy Principles prioritize managing and mitigating the\r\nenvironmental impact of AI models. Rather than using a single large, energy-intensive model for every\r\ntask, Agentforce leverages a variety of optimized models specifically tailored to each use case. This\r\napproach enables high performance with a fraction of the environmental impact.\r\n• Government partnerships: We continue to work with governments around the world to\r\nadvance responsible AI. In 2024, we reported on our progress on the White House AI Voluntary\r\nCommitments and signed onto the EU AI Pact and Canada’s voluntary AI Code of Conduct. We\r\nalso participate in a number of government-led initiatives including UNESCO’s Global Business\r\nCouncil for the Ethics of AI, the US National AI Advisory Committee, and Singapore’s AI Verify\r\nFoundation.\r\nLooking Ahead: Policies to foster an\r\nagent-ready digital ecosystem\r\nPolicymakers around the world are grappling with questions about the risks and opportunities of AI\r\nwhile trying to keep pace with rapidly advancing innovation. Although agents are the latest technology\r\nbreakthrough, the fundamental principles of sound AI public policy that protects people and fosters\r\ninnovation remain unchanged: risk-based approaches, with clear delineation of the roles of different\r\nactors in the ecosystem, supported by robust privacy, transparency, and safety rules.\r\nWith this in mind, it is time to think beyond regulating how AI is built and used. Policymakers should\r\nfocus on creating the right conditions to support wide adoption of trusted agentic AI across industries\r\nand geographies. It is imperative that no nation or community is left behind. Equipping the workforce\r\nwith the necessary skills to harness the potential of AI agents for less tedious, more meaningful and\r\nproductive work should be a cornerstone of any governmental policy aiming to advance AI development\r\nand diffusion.9\r\nGovernment services as a role model for agentic\r\nAI and humans together\r\nGovernments today face ever-increasing pressures to serve citizens with constrained resources. Research\r\nsurveying people in over 40 countries has shown that 75% expect government service quality to\r\nbe on par with leading private sector companies, while 72% are comfortable with personalized\r\ngovernment digital services. Amidst these pressures, there is clear demand for trusted innovative tools\r\nfrom government workers themselves - a recent survey across 14 countries estimated that 49% of\r\ngovernment workers have used unapproved generative AI tools at work.\r\nGovernments have enormous opportunities to safely leverage AI agents - in particular their speed,\r\nresponsiveness, personalization - to enhance citizen-facing services. For example, if a citizen wants\r\nto check on the status of their application for a license or a public benefit, an agent-powered interface\r\ncould at any hour assist them in ascertaining the status of their application, locate public information\r\non policies and application procedures, or schedule an appointment to access a related service. These\r\ninteractions would have previously taken the citizen far longer, required valuable government employee\r\ntime, may have only been available during business hours, or been left undone altogether. By deploying\r\nagents in these contexts governments can make the most of their limited resources while building citizen\r\nsatisfaction with and trust in services.\r\nIn addition to leading by example in digital transformation, governments can play a pivotal role\r\nin boosting diffusion of trusted AI solutions, including AI agents, in the private sector. As\r\ngovernments search for new ways to promote economic growth and combat inflation, this is a key\r\nmoment for national economies. The rapid development and commercialization of AI tools have created\r\nopportunities for businesses large and small around the world to rapidly become more productive\r\nand gain a competitive edge by adopting frontier technologies. But to achieve the benefits, businesses\r\nneed to overcome resource constraints, rapidly acquire new commercial knowledge and practical skills,\r\nand have regulatory clarity. For governments to position their businesses for success, they need to set\r\nambitious goals and identify and remedy the blockers that hold back the uptake of new technologies\r\nby enterprises.\r\nA recent report of the European Commission showed that the uptake of AI, cloud and big\r\ndata by European companies is well below the Digital Decade target of 75%, with only\r\n17% of businesses projected to use AI by 2030.10\r\nRecommendations:\r\n• Ambitious AI adoption strategies: Many governments worldwide have established AI strategies,\r\nbut in the face of rapidly developing applications of AI these may no longer reflect cutting-edge\r\nopportunities for economies and lack clear targets for industry adoption. Governments should\r\nconduct comprehensive analyses of potential blockers, like red tape, lack of trust, or legal clarity, etc.\r\nthat stall AI adoption. They should also work with private sector stakeholders to foster the diffusion\r\nof AI tools and solutions within industries, for example through AI Showcases at industry and trade\r\nconvenings. Earmarking funding, in the form of a National AI Adoption Fund, to strengthen AI\r\ndiffusion in the private sector would also be a positive measure, particularly for smaller and mediumsized enterprises that often face more challenges in their digital transformation journeys.\r\n• Agent-ready governments: Governments should adopt AI-first approaches to transform public\r\nadministration, with clear measures to encourage AI adoption for all government agencies, including\r\nby ensuring that government technology systems are modernized and agent-ready. Government IT\r\nmodernization strategies should support the preconditions that smooth the path to adoption of AI\r\nagents, including by redoubling efforts to ensure internally coherent and interoperable data systems\r\nand integration strategies.\r\n• Public procurement modernization: Governments should review procuring procedures to ensure\r\nflexible, outcome-based standards that do not inadvertently hinder the procurement of innovative\r\nsolutions like AI agents. Memoranda of Understanding or similar framework agreements with\r\ntechnology suppliers can help streamline the adoption of new solutions at scale by easing the path\r\nfor procuring agencies to verify compliance of these solutions with procurement guidelines or other\r\nrequirements. To help procurement officers traverse the AI learning curve and gain confidence,\r\ngovernments should establish processes to share information and best practices in government AI\r\nagent use cases, contracting, and management. Senior government officials should also be provided\r\nwith “AI for Executives” courses to ensure that those accountable for government technology\r\ndecisions understand the benefits and can appropriately oversee AI procurement.\r\n• Chief AI Officer: Governments should coordinate their AI adoption efforts to ensure best practices\r\nare shared across different agencies and departments. While each government agency will benefit\r\nfrom having its own AI experts who assess AI-related rules for the field of activity that the agency\r\nsupervises, a Chief AI Officer could promote government-wide approaches to AI adoption,\r\nadvocating for an AI-first, and even AI agent-first, approach to public administration. This role could\r\nalso be an extension of the remit of existing government Chief Technology Officers, Chief Data\r\nOfficers, and/or Chief Information Officers.11\r\nPublic policy that enhances stakeholder trust\r\nPublic policy frameworks should aim to establish and maintain trust with stakeholders, creating the right\r\nconditions for the safe and responsible development and use of AI agents, while promoting innovation\r\nand competition. Fundamental principles, such as a risk-based approach, appropriate allocation of\r\nresponsibilities across the supply chain, and predictable and proportional privacy laws remain central to\r\nthe proper governance of AI agents.\r\nInternational AI governance efforts should include diverse geographies (Global South), sectors\r\n(developers, deployers, distributors, users, large and small companies), and disciplines (scientists,\r\nethicists, technical experts, policymakers, academia, privacy practitioners, and civil society).\r\nRecommendations:\r\n• Risk-based policy frameworks: To build on the benefits that enterprises are already seeing from AI\r\nadoption, governments should pursue nuanced policies that establish effective guardrails around\r\nAI while also enabling creative experimentation and innovation. In our white paper “Shaping the\r\nfuture: A policy framework for trusted Enterprise AI”, we outlined why policymakers should adopt\r\ntailored approaches based on appropriate definitions of risk. These should focus on the applications\r\nor contexts of use of AI that are likely to cause significant harm to the rights and freedoms of an\r\nindividual, including economic and physical impact, and also to the rights to privacy and to be\r\nfree from discrimination. Enterprise AI solutions such as Agentforce are generally lower risk than\r\nconsumer-facing AI technologies, because they are context-specific, are grounded on trusted\r\ncustomer data, and comply with higher levels of data privacy, security, and accuracy.\r\n• Appropriate allocation of responsibilities: Agentic AI is made possible by different entities in the AI\r\nvalue chain. The data, LLM, platform, application, and fine-tuning could all be performed by different\r\nentities. To effectively address risk, policy frameworks should acknowledge this nuance and assign\r\nresponsibilities carefully and in relation to the amount of information and level of control different\r\nparties have over the distinct elements comprising an agent. In the enterprise space, the platform\r\nprovider may be best positioned to recommend and evaluate responsible configurations; the LLM\r\nprovider to describe the underlying model and explain its decision-making algorithm; and the\r\ncustomer providing the data that goes into the AI agent to control how such data is used as well as\r\nthe context in which the agent is utilized. Our proposed definitions of AI developer, deployer, and\r\ndistributor take the foregoing considerations into account. Appropriate allocation of responsibilities\r\nis also a critical step in avoiding the privacy harms that may result by assigning to entities inconsistent\r\n(or even conflicting) responsibilities under AI and data protection regulatory schemes. For example,\r\nrequiring that data processors/service providers take an active hand in monitoring or modifying data\r\nunder AI laws – especially in ways typically reserved for data controllers/businesses under privacy\r\nlaws – will only increase the number of parties that have access to and can manipulate consumers’\r\npersonal data, yielding more vectors of attack.12\r\n• Supportive data privacy rules: Because data quality is so important to effective AI agents, care\r\nshould be taken to ensure that rules facilitate productive, secure, and transparent uses of data,\r\nincluding personal data. Comprehensive privacy protections and clear rights and obligations will\r\nenable both enterprise users and consumers to feel confident managing their sensitive information\r\nin AI systems incorporating agents. Alignment on core concepts such as a delineation between data\r\ncontrollers and data processors is critical to achieve sound risk-based and proportionate regulation.\r\nPrivacy rules should also facilitate safe and compliant uses of data, including through the deployment\r\nof Privacy Enhancing Technologies (PETs).\r\n• Documentation: Creators of agentic AI should provide proper documentation on both the\r\nresponsible configurations and acceptable uses of agents to their customers.\r\n• Transparency: Where AI agents are interacting with end-users, policy frameworks should require\r\nthat it is made clear that the interaction is with an AI tool and not a human. Furthermore, content\r\ncreated and autonomously delivered by AI should be clearly marked as such (e.g. disclosures in agent\r\nresponses to consumers, or use of watermarks on an AI-generated image).\r\n• International alignment: Globally interoperable rules and norms will help with reinforcing trust in\r\nand diffusion of agentic AI. Coordination among National AI Safety Institutes should continue and\r\nbe further strengthened with regular meetings including broad industry participation as appropriate.\r\nOngoing efforts by the G7, the OECD, and the UN are examples of promising initiatives to advance\r\ninternational cooperation on AI governance. Inclusion of diverse voices in these conversations,\r\nincluding from the Global South, will ensure a holistic approach to dealing with the risks and\r\nopportunities of AI.\r\nWorkforce skilling for agentic AI and humans together\r\nThe opportunities of AI agents to enhance productivity and creativity, augment the work of humans, and\r\ncreate new jobs, are limitless. However, the rapid pace of innovation is raising concerns about AI replacing\r\nhuman workers. AI isn’t the first automated technology to stir fears about job displacement. Our societies\r\nhave experienced similar shifts before, such as when the Internet revolution changed the way we work\r\nand interact forever. Salesforce itself was born out of the movement to the cloud, and our ecosystem\r\nalone has created its own job surge.\r\nAs AI reshapes the way we work, the focus should be on the training, creativity, and critical\r\nthinking skills that are uniquely human. The global workforce must be equipped to use this\r\ntechnology safely and confidently. Equitable access to AI is essential so that all individuals,\r\nregardless of their background or location, can benefit from these advancements and contribute\r\nto a more inclusive and prosperous future.13\r\nUnderstanding the future of work means understanding which tasks, as opposed to whole jobs, should\r\nbe automated, and which should not. AI itself can help us identify the tasks it can best handle and\r\nprovide better tools to understand the skills workers might need in the future, designing personalized\r\nlearning paths to assist with the transition.\r\nGovernments should prioritize public policies and collaborate with civil society and industry to equip\r\nworkers with the right skills to thrive in the new opportunities created by agentic AI. While each\r\njurisdiction will adopt capacity-building policies tailored to their national or regional needs, there is\r\nalready a robust set of recommendations and approaches from international organizations like UNESCO,\r\nthe OECD, and the IMF that can serve as a basis to build on.\r\nOrganizations must focus on training their employees to use and manage AI agents, but also to be\r\nstrategic, make good decisions, and be creative, ensuring that technology remains in the service of\r\nhumans. Trailhead, our free online platform, has expanded its courses to offer AI-specific skills training,\r\nincluding AI fundamentals, ethical AI use and prompting. Salesforce also announced that it will offer\r\nits existing premium AI courses and AI certifications free of charge and available to anyone on Trailhead\r\nthrough the end of 2025.\r\nAdditionally, we make our spaces around the world available for on-site training sessions. In June\r\n2024, Salesforce opened its first AI Center in London, and will unveil a new pop-up AI Center at its\r\nheadquarters in San Francisco in 2025, with plans to roll out additional training centers in key hubs\r\naround the world like Chicago, Tokyo, and Sydney. These centers will bring together industry experts,\r\npartners, and customers to advance AI innovation alongside providing critical upskilling opportunities.\r\nThe Salesforce Accelerator — Agents for Impact is a new initiative that provides nonprofits with\r\ntechnology, funding, and pro bono expertise to help them innovate and develop AI-powered agent\r\nsolutions to scale community impact.\r\nWhile AI’s potential is vast, its highest purpose is realized when it enhances —\r\nrather than replaces — the unique judgment, creativity, and empathy that only\r\npeople can provide.\r\n- from “State of the AI Connected Customer” report14\r\nRecommendations:\r\n• Study the impact of AI agents on jobs: Much is not yet known about how AI agents will impact the\r\nworkforce and how this will vary among different countries, communities, sectors, and types of work.\r\nUnderstanding the specific contours of this impact for stakeholders is necessary for national, regional,\r\nand local governments to shape informed policies that support their workers and industries during\r\nthis technological shift. Policymakers should evaluate how AI agents influence job creation, task\r\ntransformation, and skill requirements in order to design targeted interventions.\r\n• Expand and invest in public-private partnerships for workforce reskilling: Governments,\r\nindustry, NGOs, and civil society each bring necessary perspectives and capabilities. Policymakers\r\nshould focus on public-private partnerships specifically geared towards reskilling the workforce to\r\nadapt to the unique challenges and opportunities posed by AI agents. These can include:\r\n− Industry-specific reskilling initiatives that bring together workers from sectors where AI agents\r\nare more intensively transforming job roles, along with educational specialists and technologists.\r\n− Collaborative programs involving industry, universities, and NGOs to connect AI-ready talent with\r\nsuitable job opportunities and facilitate the transition into the workforce.\r\n− Workshops aimed at providing businesses with practical guidance on adopting trusted\r\nenterprise AI solutions, including the integration of AI agents.\r\n− Collaboration among AI companies, industry associations, and labor unions to develop AI agent\r\nskills frameworks that are transferable within and between industries.\r\nSalesforce has joined UNESCO’s call to action to build ethical AI, prioritizing responsible\r\nskills training and governance for a fair AI future.\r\nSalesforce collaborates with governments to equip businesses of all sizes with the right AI\r\nskills and innovation, with programs like Data + AI Boost SME Program in Singapore.\r\n• Expand funding for AI skills training: Funding programs should focus on upskilling workers in\r\nindustries at risk of AI disruption, helping them transition to higher-value roles that involve managing\r\nand collaborating with AI systems. Key skills for AI preparedness in ICT roles include AI literacy,\r\ndata fundamentals, and prompt engineering. As AI agents become integral to human-agent teams,\r\nsuccess will require more than just technical engineering expertise. Equally important will be strong\r\ncommunication, collaboration, and problem-solving skills that enable individuals to work seamlessly\r\nwith both AI systems and humans, fostering effective teamwork in AI-driven environments.15\r\nConclusion\r\nFor the past 25 years, Salesforce has led our customers through every major technological shift: from\r\ncloud, to mobile, to predictive and generative AI, and, today, agentic AI. We are at the cusp of a pivotal\r\nmoment for enterprise AI that has the opportunity to supercharge productivity and change the way\r\nwe work forever. This will require governments working together with industry, civil society, and all\r\nstakeholders to ensure responsible technological advancement and workforce readiness. We look forward\r\nto continuing our contributions to the public policy discussions on trusted enterprise AI agents.\r\nLearn more about Salesforce Public Policy here.\r\nThank You\r\n• Integrate AI skills into the curriculum: Preparing the next generation for an AI-first future is\r\nessential. Education and vocational curriculums should include ongoing training in AI concepts,\r\nskills, ethical considerations, and its potential future integration as the discipline evolves. Educators\r\nshould also be enabled and reskilled to integrate AI into the curriculum and meet the needs of\r\nfuture classrooms."},"recipientGroups":[{"recipients":{"parliament":[{"code":"RG_BT_MEMBERS_OF_PARLIAMENT","de":"Mitglieder des Bundestages","en":"Members of parliament"}],"federalGovernment":[]},"sendingDate":"2025-04-08"}]},{"regulatoryProjectNumber":"RV0020827","regulatoryProjectTitle":"Digitalregulierung der EU innovationsfreundlicher ausgestalten (Digital Simplification Package)","pdfUrl":"https://www.lobbyregister.bundestag.de/media/33/65/643793/Stellungnahme-Gutachten-SG2511250005.pdf","pdfPageCount":12,"text":{"copyrightAcknowledgement":"Die grundlegenden Stellungnahmen und Gutachten können urheberrechtlich geschützte Werke enthalten. Eine Nutzung ist nur im urheberrechtlich zulässigen Rahmen erlaubt.","text":"Brussels, October 14, 2025\r\nRe: Call for Evidence – Simplification – digital\r\npackage and omnibus\r\nSalesforce welcomes the opportunity to provide comments to the European Commission’s call\r\nfor evidence on the Digital Omnibus (Digital Package on Simplification).\r\nThe European Union's ambition to become a global digital leader is being challenged by its own\r\nfragmented and complex regulatory framework. Despite having strong research and innovation\r\ncapabilities, the EU has struggled to turn these into a competitive advantage, especially in\r\nadvanced technologies like artificial intelligence (AI). To unlock its full potential and complete its\r\nDigital Single Market, Europe needs a simpler, more coherent, and consistent legislative\r\nframework for digital policies.\r\nThe Imperative for Simplification and Coherence\r\nA fragmented legal and regulatory landscape across the 27 EU Member States creates a\r\npatchwork of rules that confuses businesses, increases compliance costs, and slows down the\r\ndevelopment and deployment of digital services. A 2024 report by the European University\r\nInstitute's Centre for a Digital Society stated that “the number of new laws that have been\r\nintroduced to promote digitalisation (...) is simply enormous”1. “The future of European\r\ncompetitiveness” Report by Professor Mario Draghi calls for a 25% cut in overall reporting\r\nobligations2 and it highlights the importance that a more joined-up approach to policy\r\ncoordination across all EU institutions can play to foster growth and competitiveness.\r\nFragmentation is particularly harmful for Europe's numerous startups and SMEs, which are\r\noften the main drivers of innovation but lack the resources to navigate a complex legal\r\n2\r\np. 69 The future of European competitiveness report, by Professor Mario Draghi,\r\nhttps://commission.europa.eu/topics/eu-competitiveness/draghi-report_en#paragraph_47059\r\n1\r\np.11 MARCUS, J. Scott, ROSSI, Maria Alessandra, Strengthening EU digital competitiveness : stoking\r\nthe engine, EUI, RSC, Research Project Report, 2024, Centre for a Digital Society -\r\nhttps://hdl.handle.net/1814/76877\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforce\r\nRef. Ares(2025)8711655 - 14/10/2025environment. The lack of a true Digital Single Market means that a company developing a new\r\nservice or technology must often adapt it to different national laws, leading to significant delays\r\nand higher costs. This discourages investment and makes it harder for European companies to\r\nscale up and compete with global players. The issue is not the sheer number of laws, but also\r\ntheir overlaps and inconsistencies, which risk stifling innovation and preventing firms from\r\nscaling and competing internationally.\r\nBoosting AI with Harmonized Rules\r\nThe European Commission has made it clear that they want to boost AI development in the EU.\r\nAs Commission President Ursula von der Leyen has stated, \"This is the mission of the next\r\ndecade to make Europe one of the leading AI continents”.3 However, without a streamlined legal\r\nframework, this ambition is at risk.\r\nThe effectiveness of the EU AI Act hinges on clarifying and proportionally applying its\r\nsubstantive requirements. Its foundational risk-based approach is undermined by overly broad\r\ndesignations (e.g., in Annex III), which risk inadvertently over-regulating low-risk systems,\r\ncreating significant legal uncertainty and hindering the consistent implementation across all\r\nMember States. This issue is compounded by the persistent fragmentation of the EU's wider\r\ndigital policy on data governance and cybersecurity, which can create contradictory\r\nrequirements that stifle innovation. For instance, a lack of consistent rules on data access and\r\nuse across different sectors could severely limit the data available for training robust AI models.\r\nThe Draghi report made this point clearly, stating that \"regulatory barriers to scaling up are\r\nparticularly onerous in the tech sector, especially for young companies\". It also warns that the\r\nEU's \"regulatory stance towards tech companies hampers innovation\".4\r\nTo address this, the EU must move toward a more coherent, technology-neutral approach.\r\nRegulatory frameworks need to be agile and forward-looking to adapt to the latest technological\r\ndevelopments, such as generative AI and agentic AI. Simplifying the regulatory framework\r\nwould provide the legal certainty that companies need to confidently invest in and deploy AI\r\nsolutions. This would benefit private enterprise, public administrations, and critical sectors like\r\nmanufacturing and healthcare, ultimately enhancing Europe's competitiveness.\r\n4\r\np. 32 The future of European competitiveness report, by Professor Mario Draghi,\r\nhttps://commission.europa.eu/topics/eu-competitiveness/draghi-report_en#paragraph_47059\r\n3 Opening keynote speech by President von der Leyen at the ‘One Year After the Draghi Report'\r\nConference, September 16, 2025\r\nhttps://ec.europa.eu/commission/presscorner/detail/en/speech_25_2102\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforceData Protection as an Enabler, Not a Barrier\r\nCrucially, this push for simplification does not mean compromising the fundamental protection of\r\nEuropeans' personal data. The EU's commitment to data privacy, as enshrined in the GDPR, is\r\nnot under question. The goal is for data protection principles to coexist with and enable\r\ninnovation, not hinder it. Consistency of laws and regulations can in fact strengthen data\r\nprotection by bringing clarity and coherence on core principles.\r\nThe GDPR and the EU AI Act should work in tandem. The EU AI Act provides a legal framework\r\nfor the safe development of AI systems, while the GDPR ensures the protection of personal\r\ndata used by those systems. Data protection should be viewed as a way to build trust with\r\ncitizens and provide legal certainty for businesses, thereby fostering innovation rather than\r\nstifling it. By focusing on clear, consistent implementation and providing practical guidance for\r\nhow GDPR principles apply to new technologies, the EU can ensure that it remains a global\r\nleader in both innovation and data governance.\r\nA good practical example of AI in the service of privacy are AI agents. With the right guardrails,\r\nAI agents can fortify a company’s privacy efforts, minimizing human error, limiting data access\r\nby design, and acting as privacy intermediaries. Agentforce, the agentic layer of the Salesforce\r\nPlatform, empowers businesses to configure agents with strict boundaries when processing\r\npersonal data, prioritize data minimization, and ensure compliance with global privacy\r\nregulations. For example, an AI agent can be configured to present an organization’s privacy\r\nstatement to individuals at the first point of contact, provide “just-in-time” notices if at any point\r\nfurther personal data is collected from individuals, and provide up-to-date and relevant\r\ninformation on how to exercise data subject rights on request. Agents could also be configured\r\nto help consumers better understand a company’s privacy policies by creating a conversational\r\ninterface for them to ask questions, demystifying complex legal jargon, and fostering\r\ntransparency.\r\nIn this paper, we present our concrete and targeted suggestions including (i) clarifications to the\r\nEU AI Act to strengthen legal certainty and alignment with existing EU rules; (ii) improvements\r\nto the GDPR to strike the right balance between robust data protection and the development of\r\nAI; (iii) improvements to other data-related regulations; and (iv) the streamlining of cybersecurity\r\nrequirements.\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforceOur proposals\r\nProposals for the EU AI Act\r\nDesignation of “high-risk” in the EU AI Act\r\n● The issue: High-risk designations in the EU AI Act should be more narrowly tailored to\r\ncapture truly high-risk activities. For instance, according to Annex III (4), AI systems in\r\nthe areas of “employment, workers management and access to self-employment” are\r\nhigh-risk. However, such a broad reference may capture use cases that should not be\r\nconsidered high-risk. AI has been used for administrative tasks in recruitment for years,\r\nsuch as for resume matching and filtering. AI can help recruiters and employers to match\r\nopportunities to those workers that are genuinely qualified or interested. As long as a\r\nhuman is in the loop for the ultimate hiring or dismissal decisions, these use cases\r\nshould not be de facto considered high risk. The stated goal of the EU AI Act is to\r\n“guarantee safety, fundamental rights and human-centric AI, and strengthen uptake,\r\ninvestment and innovation in AI across the EU”5. This goal and the Act’s risk-based\r\napproach will be better served from clear and narrow interpretations of high-risk, which in\r\nthe example presented above, would mean limiting the high-risk designation to AI\r\nsystems that are used without human intervention or review.\r\n● Our proposal:\r\n○ Avoiding overly broad designations will enhance legal certainty and boost the\r\nuptake of these innovative solutions by European industry. We have presented\r\nour detailed position on this matter in our response to the European\r\nCommission’s public consultation on the classification of AI systems as high-risk.\r\nAs a first step, we believe that the upcoming guidance is an excellent opportunity\r\nfor the European Commission to provide the necessary clarity on this issue.\r\n○ While we understand that exceptions are contemplated in the derogations from\r\nthe high-risk classification requirements in Article 6(3), the steps to establish a\r\nderogation to the high-risk designation are burdensome (e.g. documentation and\r\nregistration). Therefore, we would encourage the Commission to provide\r\nclarification by narrowing the designation rather than requiring such exceptions to\r\nfollow the derogation requirements.\r\n○ Therefore, as part of the Digital Omnibus or the upcoming Digital Fitness Check,\r\nwe urge the European Commission to consider modifications to Annex III (4), as\r\n5 European Commission website on the AI Act:\r\nhttps://digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforcewell as similar review and simplification of all Annex III use cases. Additionally,\r\nwe suggest amending Article 6(3) as follows: “By derogation from paragraph 2,\r\nan AI system referred to in Annex III shall not be considered to be high-risk\r\nwhere it does not pose a significant risk of harm to the health, safety or\r\nfundamental rights of natural persons, including by not materially influencing the\r\noutcome of decision-making or by ensuring meaningful human intervention or\r\nreview”.\r\nAligning obligations of high-risk AI providers with other EU legislative frameworks\r\n● The issue: Certain requirements of a high-risk provider under the EU AI Act may be\r\nread to contradict provider obligations as a data processor under the GDPR. For\r\nexample, post-market monitoring requirements under the EU AI Act seem at odds with a\r\nprocessor's obligation to only process data for very limited purposes as dictated by\r\ncustomer instructions (e.g. related to providing the service).\r\n● Our proposal:\r\n○ We recommend revisiting these obligations, in particular limiting the requirement\r\non providers to an obligation to provide deployers with the tools required to\r\nconduct the post-market monitoring themselves.\r\n○ Alternatively, we recommend clarifying that the provider's post-market monitoring\r\nobligations are solely with regard to system-level risks, and do not require access\r\nto the deployer's environment.\r\n○ We also recommend ensuring incident response requirements in the EU AI Act\r\nalign with incident response requirements in other regulations (e.g. GDPR) rather\r\nthan conflict and/or create net new requirements. This will prevent the creation of\r\nredundant, conflicting, and resource-intensive reporting obligations, thereby\r\nallowing resources to be focused on actual security and safety.\r\nStreamlining enforcement\r\n● The issue: The enforcement of the EU AI Act involves a combination of centralized and\r\ndecentralized structures, with national market surveillance authorities, the European\r\nCommission (via the AI Office), and the European Data Protection Board all playing roles\r\nin the enforcement process. The number of players involved in enforcement will\r\ninevitably result in inconsistencies.\r\n● Our proposal: We invite the European Commission to consider the introduction of a\r\nmutual recognition framework to address the risk of fragmentation and inconsistency in\r\nthe EU AI Act's enforcement. A mutual recognition framework will foster consistency\r\nacross national authorities, meaningfully advancing the goal of a single, harmonized EU\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforcemarket for AI. The principle of mutual recognition dictates that a product (in this case, an\r\nAI system) lawfully placed on the market or certified as compliant in one Member State\r\nshould be automatically accepted as compliant by all other Member States. A mutual\r\nrecognition framework would require a national authority to accept the conformity\r\nassessment or the compliance certification issued by another Member State's Notified\r\nBody or authority. This would eliminate the ability of one national authority to reject an AI\r\nsystem that has already been approved elsewhere, and advance consistent\r\ninterpretation of the AI Act's horizontal rules.\r\nMutual recognition of existing international standards\r\n● The issue: The European Commission is facing delays in developing the technical\r\nstandards for the EU AI Act, which are essential for turning the law's principles into\r\nconcrete rules. Companies are left with uncertainty on how to achieve compliance,\r\nmaking it difficult to prepare for the law's upcoming deadlines. This uncertainty not only\r\nstifles innovation but also puts a strain on companies, particularly SMEs, who can't\r\nafford to risk non-compliance.\r\n● Our proposal: To address this, the European Commission should prioritize the mutual\r\nrecognition of internationally recognized standards, such as ISO 42001. This approach\r\nwould allow companies to use globally accepted benchmarks to demonstrate\r\ncompliance, rather than waiting for EU-specific standards that may be years away. By\r\nrecognizing existing international standards, the EU would provide immediate clarity and\r\na practical pathway to compliance for businesses worldwide. This would also ensure\r\nconsistency, as companies in different member states could all rely on the same\r\nwell-established standards. This framework would accelerate the adoption of safe and\r\ntrustworthy AI while maintaining the EU's goal of a harmonized market.\r\nProposals for the intersection of Data Protection and AI\r\nAligning Data Subject Rights to AI needs\r\n● The issue: The GDPR's Data Subject Rights in Articles 15 (access), 17 (erasure), and\r\n21(objection) create significant challenges for the development and deployment of AI\r\nmodels. Once data has been used to train an AI model, it becomes inextricably\r\nembedded within the model’s learned parameters. “Model unlearning” can be technically\r\nchallenging or require disproportionately costly retraining of the entire model, effectively\r\ndemanding the destruction of a valuable asset built on significant investment.\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforce● Our proposal:\r\n○ We encourage the European Commission to acknowledge that the rights to data\r\naccess and erasure present unique challenges when personal data has been\r\nprocessed for the purpose of training an AI model, where such data becomes\r\ndeeply and inseparably embedded within the model's architecture. In such cases,\r\nand where the controller has taken all reasonable technical and organizational\r\nmeasures, including data minimization, anonymisation and pseudonymization\r\nprior to training, to prevent the retention of personal data within the model's\r\nparameters, the right to access, erasure, or objection shall not be interpreted as\r\nrequiring the complete retraining, deletion or expungement from the AI model.\r\nThe controller shall, however, provide the data subject with an explanation of how\r\ntheir data is being used and the practical limitations of its erasure from the model\r\nitself.\r\n○ Where complete erasure, access, or objection is not technically possible without\r\ndisproportionate impact on model integrity, public interest, or the rights and\r\nfreedoms of others, alternative measures may be implemented, including:\r\n■ Certification of data minimization, anonymization, or pseudonymization\r\nprior to training;\r\n■ Model training documentation;\r\n■ Provision of high-level summaries of model logic or decision criteria;\r\n■ Use of synthetic data to mitigate personal data dependency.\r\n○ These recommendations do not waive the fundamental rights of data subjects,\r\nbut guide proportionate enforcement in AI-specific contexts.\r\nRe-imagining the legal basis for training\r\n● The issue:\r\n○ Consent is a challenging legal basis to use in practice when training AI models,\r\nleaving companies with fewer options and often forcing them to rely on legitimate\r\ninterest (LI), which requires a Legitimate Interest Assessment (LIA). The issue is\r\nthat the data used for AI training may not have been collected under\r\ncircumstances that meet LI requirements.\r\n○ This creates legal uncertainty for companies developing AI. We believe the\r\nconcept of consent needs to be fundamentally revisited to make it a more\r\nworkable legal basis for AI development. The European Commission should also\r\nprovide clearer guidance on how to use LI for AI development.\r\n○ This is precisely what the CNIL, the French data protection authority, did with its\r\nguidance from June 2025, which supports using LI as a legal basis for AI training,\r\nprovided certain conditions are met. By promoting LI through practical, detailed\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforceguidance, the CNIL offers a model for a more flexible and realistic approach to\r\ndata governance for AI development, giving companies much-needed legal\r\nclarity.\r\n● Our proposal:\r\n○ We recommend that the CNIL approach is promoted among regulatory and\r\nenforcement authorities as helpful guidance regarding the use of LI as a legal\r\nbasis for processing personal data, particularly in the context of AI system\r\ndevelopment and training. This guidance can serve as a practical and immediate\r\napproach for organizations navigating the complexities of data protection in AI.\r\n○ In the longer term, we propose clarifying Recital 32 in the GDPR as follows:\r\n“When personal data is processed for the development of artificial intelligence\r\nsystems, particularly for the purpose of training an artificial intelligence system or\r\nmodel, consent should cover all processing activities carried out for that\r\ndevelopment purpose, provided that the data subject is informed in a clear,\r\nconcise, and non-disruptive manner about the nature of the AI system being\r\ndeveloped and its intended functionalities. To ensure that the purpose limitation\r\nprinciple is applied to future applications of the artificial intelligence system or\r\nmodel, the type of system or model being developed should be described along\r\nwith an illustration of its key potential functionalities, thereby fulfilling the\r\nrequirement for a specific, informed, and unambiguous indication of agreement.”\r\nStreamlining Data Protection Impact Assessments (DPIA), Records of Processing\r\nActivities (ROPA), Legitimate Interest Assessments (LIA), Transfer Impact Assessments\r\n(TIA)\r\n● The issue: The current compliance structure for data protection particularly as it applies\r\nto AI companies operating globally, is not streamlined for efficiency but rather based on\r\nseparate, overlapping documentation requirements, involving separate, repetitive DPIAs,\r\nTIAs, ROPAs, and LIAs. This creates an excessively burdensome and highly costly\r\ncompliance environment that promotes non-compliance over genuine risk mitigation.\r\n● Our proposal:\r\n○ To resolve this complexity and foster a pragmatic and proportionate regulatory\r\nenvironment, we propose to consolidate DPIAs, TIAs, and LIAs into a single,\r\ntiered Integrated Risk Assessment document.\r\n○ Separately, the requirement for every single exporter to independently evaluate\r\nthe laws of non-adequate third countries is repetitive and costly. We urge the\r\nEuropean Commission to publish Regional Risk Profiles for non-adequate\r\ncountries, which companies can rely on for the foreign law analysis and only\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforceperform a supplementary check on residual risks specific to their processing\r\nactivity.\r\nData anonymization\r\n● The issue: The GDPR’s high bar for true anonymization is compounded by the\r\ncapabilities of modern AI. The European Data Protection Board requires that\r\nanonymization be assessed case-by-case, requiring that the risk of re-identification be\r\nsufficiently remote based on all means reasonably likely to be used. Because true\r\nanonymization is virtually impossible to prove, data intended for AI analysis is often\r\nregulated as personal data even if minimal risk exists. This risks unnecessarily triggering\r\nthe full compliance regime (DPIAs, consent, rights of erasure) for low-risk applications.\r\n● Our proposal: We encourage the European Commission to develop a more realistic\r\nand achievable technical threshold for how difficult it should be to re-identify or reverse\r\nanonymized data, specifically for non-sensitive or low-risk data. This would set a clear,\r\nrisk-based benchmark, thus preventing the stifling of beneficial data use and analysis\r\nacross the vast majority of less sensitive applications. It would also strengthen data\r\nprotection, as currently, many companies don't take actions that would meaningfully\r\nprotect data, because they fall short of the existing standard for anonymization.\r\nProposals on data-related regulations\r\nGDPR: Cross-border data transfers and data “in the clear”:\r\n● The issue: The \"no data in the clear\" standard for cross-border data transfers, stemming\r\nfrom post-Schrems II guidance from the European Data Protection Board6, is unclear\r\nand often a friction point. It is not aligned to the operational reality of modern, global\r\ncloud SaaS services, which require some level of data processing in clear (decrypted)\r\nform to function.\r\n● Our proposal: We call the European Commission to initiate a targeted consultation with\r\nregulators, industry, and other relevant stakeholders, to develop risk-based,\r\nproportionate guidance that distinguishes between data processing \"in the clear\"\r\n6 EDPB Recommendations 01/2020 on measures that supplement transfer tools to ensure compliance\r\nwith the EU level of protection of personal data\r\nhttps://www.edpb.europa.eu/our-work-tools/our-documents/recommendations/recommendations-012020-\r\nmeasures-supplement-transfer_en\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforcenecessary for the core functionality of a standard SaaS contract, and the unnecessary or\r\nhigh-risk exposure of sensitive data.\r\nGDPR: Addressing the weaponization of DSARs in employment disputes\r\n● The issue: The use of Data Subject Access Requests (DSARs) as a \"weapon\" in\r\nemployment disputes is escalating across the EU, forcing employers into\r\ndisproportionate data disclosure efforts and risking the privacy rights of colleagues and\r\nthird parties named in communications.\r\n● Our proposal: Following the UK's recent, more nuanced guidance on addressing\r\n\"manifestly unfounded or excessive\" requests, which offers a clearer path to challenge\r\nDSARs made with a malicious or harassing intent, we propose that the European\r\nCommission issue clarification or guidance on Article 12(5) and Article 15(4) of the\r\nGDPR. This must explicitly empower data controllers to refuse or strictly limit the scope\r\nof DSARs in employment contexts where the primary motive is demonstrably vexatious,\r\npurely for litigation discovery, or when responding fully would necessitate the disclosure\r\nof third-party personal data that cannot be reasonably redacted or anonymised without\r\ndisproportionate effort or prejudice to those individuals' rights.\r\nEU Data Act\r\n● The issue: Chapter VI of the Data Act contains obligations for providers of data\r\nprocessing services to facilitate and enable customers’ switching to other services. To\r\ncomply with these requirements, Salesforce has updated its online services terms with a\r\ndedicated Data Act Addendum to address customer needs. We believe that more\r\ntargeted guidance and additional clarifications are needed to ensure effective data\r\nportability and interoperability.\r\n● Our proposal: We call the European Commission to swiftly adopt targeted non-binding\r\nguidelines aimed at identifying the current interoperability obstacles, allowing B2B\r\nproviders to make available to their customers self-automated tools able to easily initiate\r\nand complete the switching process. We believe that allowing standards and other\r\nindustry initiatives to evolve will reduce costs and speed up the switching process for\r\nEuropean companies.\r\n415 Mission Street\r\nSan Francisco,\r\nCalifornia 94105\r\nSalesforce, Inc.\r\n/salesforce\r\n@salesforceProposals on cyber-related regulations\r\nStreamline Incident Reporting Requirements\r\n● The issue: Managing cyber incident response and recovery is challenging enough\r\nwithout the added complexity of multiple reporting frameworks. GDPR, NIS2, DORA, and\r\nCRA regulations each have their own specific criteria and scope for incident reporting.\r\nThis can create a significant burden, forcing organizations to report the same incidents\r\nmultiple times and in different formats, each with unique technical and language\r\nrequirements. For entities like Salesforce operating across multiple jurisdictions, this\r\nduplication increases operational costs and complexity without improving response\r\neffectiveness.\r\n● Our proposal: We invite the European Commission to further exchange with Member\r\nStates in order to establish a single, national-level entry point for all relevant reports.\r\nAdditionally, an alternative solution could be the creation of a single reporting platform\r\nwithin the European Union Agency for Cybersecurity (ENISA). This could serve as a\r\npowerful tool to route reports to the appropriate national authorities. It would maintain the\r\nintegrity of national entry points while creating a resilient, efficient system that avoids a\r\nsingle point of failure.\r\nAlign “Main Establishment” Definition\r\n● The issue: The current EU cybersecurity landscape, with regulations like CRA, DORA\r\nand NIS2, often presents a challenge for companies. The use of different terminology to\r\ndefine similar concepts such as “main establishment” creates confusion and makes\r\ncompliance complex, especially for businesses operating across multiple frameworks.\r\nThis complexity can lead to a misallocation of resources, as organizations are forced to\r\nprioritize compliance over proactive mitigation, response, and recovery efforts. This\r\nultimately weakens the EU's overall cyber resilience and diminishes the benefits of\r\ncentralized reporting offered by ‘main establishment’ concepts as a whole.\r\n● Our proposal:\r\n○ We recommend the European Commission to leverage the upcoming CSA\r\nreview to ensure that the definition of “main establishment” is aligned across\r\nmultiple frameworks, including the CRA, NIS2 and the GDPR, using the definition\r\nprovided in NIS2 as the common reference point. This would allow entities falling\r\nunder the scope of more than one framework to rely on a harmonized and legally\r\nconsistent definition and interpretation of their main establishment for reporting\r\npurposes.\r\nAdditionally, to ensure consistent application and interpretation of NIS2 across all\r\n27 Member States, we recommend the European Commission issue clear\r\nguidance on the legislative intent behind the \"main establishment\" concept. This\r\nwill promote the uniform application of the law and protect the ultimate goal laying\r\nbehind the One-Stop-Shop mechanism: strengthening EU’s cyber resilience."},"recipientGroups":[{"recipients":{"parliament":[],"federalGovernment":[{"department":{"title":"Bundeskanzleramt (BKAmt)","shortTitle":"BKAmt","url":"https://www.bundeskanzler.de/bk-de","electionPeriod":21}}]},"sendingDate":"2025-10-17"}]}]},"contracts":{"contractsPresent":false,"contractsCount":0,"contracts":[]},"codeOfConduct":{"ownCodeOfConduct":false}}