diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 369709f05..c60f2dd49 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -48,8 +48,9 @@ jobs: uses: actions/setup-node@v1 with: node-version: 22 - - run: npm install - - run: tsc + - run: npm ci --verbose + - run: npx tsc --version + - run: npm run typecheck tests: name: Ruby on Rails tests diff --git a/app/assets/stylesheets/_variables.scss b/app/assets/stylesheets/_variables.scss index 51afa04b5..d412b3ec7 100644 --- a/app/assets/stylesheets/_variables.scss +++ b/app/assets/stylesheets/_variables.scss @@ -18,6 +18,7 @@ $danger: #EB5959; $success: #2ECC71; $info: #58A09A; $brand: #4B68FF; +$brand-comp: #F05137; $data: ( diff --git a/app/assets/stylesheets/complaints.scss b/app/assets/stylesheets/complaints.scss index fdec7e561..8c679c338 100644 --- a/app/assets/stylesheets/complaints.scss +++ b/app/assets/stylesheets/complaints.scss @@ -60,3 +60,44 @@ .is-lead + h1.complaint-title { margin-top: -1rem; } + +.is-brand-comp { + color: $brand-comp; +} + +.with-subtitle { + margin-bottom: 0; +} + +.subtitle { + margin-top: 0; + font-weight: normal; + font-size: 1.2rem; +} + +.modules { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(20rem, 1fr)); + gap: 0.5rem; +} + +.module-widget { + border: 1px solid $muted-graphic; + border-radius: 0.2rem; + padding: 0.5rem; + color: $key !important; + + > h4 { + text-decoration: underline; + } + + > p { + text-decoration: none; + } + + &:hover { + background: $primary; + color: white !important; + text-decoration: none !important; + } +} diff --git a/app/controllers/complaints_controller.rb b/app/controllers/complaints_controller.rb index 3d8af4507..57d7ea24b 100644 --- a/app/controllers/complaints_controller.rb +++ b/app/controllers/complaints_controller.rb @@ -3,6 +3,7 @@ class ComplaintsController < ApplicationController before_action :access_check, only: [:show, :comment] before_action :write_access_check, only: [:self_assign, :update_status, :change_content_type] before_action :verify_staff, only: [:reports, :reporting] + before_action :training_access, only: [:training, :training_complete] def index render layout: 'without_sidebar' @@ -202,6 +203,28 @@ def reporting render layout: 'without_sidebar' end + def training + pages = Dir.glob(Rails.root.join('app', 'views', 'complaints', 'training', '*.html.erb')) + .map { |page| File.basename(page, '.html.erb') } + if pages.include?(params[:page]) + render "complaints/training/#{params[:page]}", layout: 'osa_training' + else + not_found! + end + end + + def training_complete + user_update = current_user.update(osa_training: DateTime.now) + audit_log = AuditLog.moderator_audit(event_type: 'osa_training_completed', user: current_user, + comment: 'OSA training completed.') + if user_update && audit_log + flash[:success] = I18n.t('safety_center.training_complete') + else + flash[:danger] = I18n.t('safety_center.training_complete_failed') + end + redirect_to safety_center_path + end + private def access_check @@ -235,4 +258,11 @@ def set_complaint @complaint end + + def training_access + osa_training_enabled = SiteSetting['OSATrainingEnabled'] + unless user_signed_in? && (current_user.staff? || current_user.at_least_moderator?) && osa_training_enabled + not_found! + end + end end diff --git a/app/views/complaints/index.html.erb b/app/views/complaints/index.html.erb index 84243a825..b0476f101 100644 --- a/app/views/complaints/index.html.erb +++ b/app/views/complaints/index.html.erb @@ -25,6 +25,20 @@ +
Thank you for taking the time to make a report. If you've seen harmful, abusive, or illegal content on our communities, you can report this to us here. You can also use this page if you've received a message saying we've - classified your content as harmful, abusive, or illegal and you wish to contest it. + classified your content as harmful, abusive, or illegal and you wish to contest it, or if you have a complaint + about our processes or our compliance with our duties.
Last updated 22 March 2026
+ ++ Thank you for taking the time to go through this training. We appreciate that these are often difficult and sensitive + topics, but your understanding and assistance help us to create safe communities for all our users. As community + moderators you're likely to see problems before we do, and we appreciate your help in bringing them to our attention. +
++ Please click the button below to mark your training complete. You can come back to these pages at any time from the + Safety Center if you need a reminder. If you need any support, please contact the Community Team. +
+ +<%= form_tag osa_training_complete_path, method: :post do %> + <%= submit_tag 'Mark as complete', class: 'button is-filled is-primary' %> +<% end %> diff --git a/app/views/complaints/training/definitions.html.erb b/app/views/complaints/training/definitions.html.erb new file mode 100644 index 000000000..4ca657e34 --- /dev/null +++ b/app/views/complaints/training/definitions.html.erb @@ -0,0 +1,275 @@ +Last updated 12 March 2026
+ ++ It is important to understand what each type of content is exactly, so they are defined here. These are definitions + which we have written: the Act defines each type of content in terms of related criminal offences, which is more + complex than is necessary to deal with the content, so these descriptions are intended to provide a simple overview. +
+ ++ Our risk assessment has also identified some additional types of illegal content based on the platform's risk profile. +
+ +Last updated 22 March 2026
+ ++ It's important that illegal content is handled consistently in order to ensure that we are able to meet the statutory + requirements of us, to protect our users, and to enable us to be transparent with our communities. The way in which + illegal content is handled is set out in our Online Safety & Illegal Content Policy & Procedure, OP02 — you can + find a copy of this <%= link_to 'here', SiteSetting['ContentPolicyURL'] %>. +
++ We're not asking you, as a volunteer, to make final or legal determinations; we're asking for your assistance in + identifying and escalating content that could violate the law. +
+ ++ The two ways in which you are likely to come across illegal content as a volunteer moderator are either through + encountering it as you're browsing, or through flags from community members. Either way, our ask of you is the same. + If you believe the content may be illegal: +
++ Making a judgement on whether content constitutes illegal content is surprisingly difficult. Ofcom publishes guidance, + and we have a responsibility to make an initial decision and follow through on that; however, the final say can only + be given by the UK courts. +
+If our decision is that the content is illegal, we will:
+Last updated 22 March 2026
+ ++ Every individual service has an individual risk profile based on its characteristics and demographics as different + functionalities and characteristics raise or lower the likelihood of different types of content. This means that we're + at higher risk of certain types of content than others; we identify and discuss these here. +
+ ++ While the on-platform impact of terrorism-related content is relatively low, we have a significant number of risk + factors that increase the likelihood of this type of content appearing on the platform. These include: +
++ There is no shortage of examples of online platforms where user-to-user interactions have been plagued by hateful + content. There are many places where this has got better, but also plenty where it hasn't. Although we work hard to + foster a sense of community and establish respectful baselines, the risk of this is still present. We've assessed that + this is more likely to occur in comments or user profiles than in post content. As a volunteer moderator, your actions + can set the tone for your community, so you can make the difference in how likely this is to occur. +
+ ++ While we haven't seen any content that we believe to be illegal content under this category, we have seen spam along + closely-related lines. Both here and on other platforms, it's not uncommon to see spam promoting "best escorts in X + city". There is a fine line between this being legal and illegal based on the consent of or impact on those involved, + but if there's any doubt it's safer to escalate for review. This can take the form of spam posts, but more commonly + occurs in spam user profiles, which can be longer-lived. +
+ ++ This type of content likewise often occurs in spam, both as posts and as user profiles. The more obvious occurrences + may be common scams or offers for illegitimate financial gain, but this category also includes carrying out regulated + acts without being an authorized person or body. This could cover more seemingly innocuous actions, such as users + arranging loans or credit between themselves. +
+ ++ For this type of content—among others—it's important to remember that discussion of + drugs or psychoactive substances is not necessarily illegal. We also don't currently host any communities that would + be at higher risk of this type of content by the nature of their topic area. Relevant offences include + offering to supply drugs, psychoactive substances, or UK "controlled substances" (which may include medications which + are legal elsewhere in the world); the Act also includes attempting, encouraging, or assisting these offences, which + can be difficult to assess. As with everything, if in doubt please escalate it to us for review. +
+ ++ The risks of this type of content are very similar to those of drugs and psychoactive substances, in that discussion + of the topic is not necessarily illegal, and its likelihood may be linked to communities covering related topics. + Our Outdoors community sometimes hosts content discussing tools such as knives which are weapons in other contexts; + these discussions are not necessarily illegal but additional care is required to assess when a line may have been + crossed. If in doubt, escalate! +
+ ++ Doxxing—the deliberate exposure of another user's private information—may fall under a variety of offences + depending on the exact circumstances. Unfortunately, this is something that most online social platforms have to deal + with at some point; we have had one or two such incidents in the past. Doxxing often occurs as part of a wider + campaign of offending against a specific individual known to the perpetrator, or as a result of a particularly heated + disagreement with another user. You can help to avoid the latter of those situations by actively moderating your + community, removing and redirecting disagreements before this becomes a risk, and setting a respectful tone for the + community. +
+ ++ This is likewise an unfortunate reality of many online platforms. While professional research communities tend to be + excellent at citing sources and using material appropriately and legally, most of our communities are not at that + level. Users often fall into the trap of thinking "if it's on the Internet, it must be free for anyone to use". UK + "fair dealing" exceptions are stricter and narrower than US "fair use" doctrine, which presents another risk. + Generative AI is also another emerging risk: while we have a + fairly clear policy prohibiting sole AI use, this regularly gets + missed or ignored, and work into appropriately citing sources used in generative AI models is still very young. +
+ ++ As part of our responsibilities under the Online Safety Act, we're obligated to provide training to all staff and + volunteers undertaking moderation duties. +
++ This training should take around an hour to read through all the content. Once you've finished all the modules you'll + be able to mark it as complete, but you'll be able to come back here any time if you need a reminder. +
+ ++ An overview of the Online Safety Act, our duties, and your responsibilities as a volunteer moderator. +
+ <% end %> + <%= link_to osa_training_path('illegal-content'), class: 'module-widget' do %> ++ An explanation of the difference between the 17 types of priority illegal content, and other applicable types of + non-priority illegal content. +
+ <% end %> + <%= link_to osa_training_path('definitions'), class: 'module-widget' do %> ++ Definitions of all the types of illegal content which apply to us. +
+ <% end %> + <%= link_to osa_training_path('handling'), class: 'module-widget' do %> ++ Your responsibilities and the steps you need to take in response to identifying potentially illegal content. +
+ <% end %> + <%= link_to osa_training_path('higher-risk'), class: 'module-widget' do %> ++ Some types of content are more likely to occur in our communities than others. More detail on those here. +
+ <% end %> + <%= link_to osa_training_path('conclusion'), class: 'module-widget' do %> ++ Thank you for taking the time to complete this training. Mark it as complete here and come back here if you need + to refer back to it. +
+ <% end %> +Last updated 22 March 2026
+ ++ The Act sets out 17 types of priority illegal content, and a number of types of non-priority illegal content. We have + carried out a risk assessment for all types of priority illegal content, and applicable types of non-priority illegal + content, which details the likelihood and impact of each type of content on our platform specifically. +
+ ++ The Act sets out a number of types of priority illegal content. These are generally the most serious or prevalent + types of illegal content or activity, and the Act requires service providers to take proactive measures to protect + users. This includes risk-assessing, training, and transparency requirements, in addition to the obvious removal of + content. +
++ Priority illegal content includes content or activity relating to[1]: +
++ Also called "non-designated illegal content", this refers to illegal content which is not related to a priority + offence set out in the Act. This may cover any content which is illegal under UK law, and so this is a very + wide category. The Act specifically defines two new offences as non-priority offences so that they fall into this + category, but also imposes requirements on service providers to assess the risk of other types of non-designated + illegal content which are not specifically mentioned. +
++ The two new non-priority offences are: +
++ We have also identified risks relating to: +
++ All of these types of content are defined more fully on the next page. +
+ +Last updated 12 March 2026
+ ++ The Online Safety Act 2023 (available here) is a law + established in the UK in 2023 with the aim of improving online safety, particularly with regard to children, but with + wide-ranging effects for all online services. All services with UK users are required to comply with the Act. There + are ongoing cases which will define whether this is enforceable on non-UK entities in practice, but because Codidact + is a UK-based entity, we are clearly within scope and required to comply. +
++ The Act is enforced by the UK's communications regulator, Ofcom, which also sets out the Register of Risks and Codes + of Practice on which our approach is based. +
+ ++ The Act defines two types of service: search services and user-to-user services. User-to-user services are those where + users may interact with one another; this is where we fall. There are different requirements imposed on each kind of + service, which for user-to-user services primarily focus on preventing and removing harmful content, and protecting + users from related harms. +
+ ++ Responsibility for compliance with the requirements of the Act obviously falls on us (meaning the Codidact Foundation + as the organisation running the platform). The Foundation designates one of the Board of Directors as a named + individual with ultimate responsibility for compliance with the Act, which is currently + <%= user_link(User.find(SiteSetting['OSADirector'])) %>. +
++ One of our responsibilities is to ensure that our volunteer moderators (that's you) have an awareness of the Act and + are provided with appropriate training in order to equip them to handle any harmful content which may appear on the + platform. +
+ ++ As a volunteer moderator, your job is to guide, curate, and set the tone for your community. Part of that job is + protecting the community from any unwanted content. The majority of the time, that might take the form of off-topic + posts, arguments between users, or handling flags for your attention. Unfortunately, it may also take the form of + harmful or illegal content covered by the Act, and one of your responsibilities is to ensure this is dealt with and + escalated appropriately. +
++ To be clear: we're not expecting you to handle harmful or illegal content alone. Our ask of you is + simple: if you identify something that you think would be covered by the Act, please: +
+code, keyboard, and italic are enabled.
+
+- name: OSATrainingEnabled
+ value: true
+ value_type: boolean
+ category: SafetyCenter
+ community_id: ~
+ description: >
+ Should Online Safety Act moderator training be available on this instance?
+
+- name: OSADirector
+ value: 8045
+ value_type: integer
+ category: SafetyCenter
+ community_id: ~
+ description: >
+ The user ID of the Director with responsibility for OSA compliance.
+
+- name: ContentPolicyURL
+ value: https://codidact.org/OP02
+ value_type: string
+ category: SafetyCenter
+ community_id: ~
+ description: >
+ A URL where this instance's online safety & content policy can be found.
diff --git a/test/controllers/complaints_controller_test.rb b/test/controllers/complaints_controller_test.rb
index 249395b91..bb23451d0 100644
--- a/test/controllers/complaints_controller_test.rb
+++ b/test/controllers/complaints_controller_test.rb
@@ -346,6 +346,45 @@ class ComplaintsControllerTest < ActionDispatch::IntegrationTest
assert_response(:not_found)
end
+ test 'anon should not be able to access training' do
+ get osa_training_path('home')
+ assert_response(:not_found)
+ end
+
+ test 'basic user should not be able to access training' do
+ sign_in users(:basic_user)
+ get osa_training_path('home')
+ assert_response(:not_found)
+ end
+
+ test 'moderator should be able to access training' do
+ sign_in users(:moderator)
+ get osa_training_path('home')
+ assert_response(:success)
+ end
+
+ test 'staff should be able to access training' do
+ sign_in users(:staff)
+ get osa_training_path('home')
+ assert_response(:success)
+ end
+
+ test 'training should not be accessible if disabled' do
+ SiteSetting['OSATrainingEnabled'] = false
+ sign_in users(:staff)
+ get osa_training_path('home')
+ assert_response(:not_found)
+ end
+
+ test 'should be able to complete training' do
+ sign_in users(:staff)
+ post osa_training_complete_path
+ assert_response(:found)
+ assert_redirected_to safety_center_path
+ assert_equal I18n.t('safety_center.training_complete'), flash[:success]
+ assert_not_nil users(:staff).osa_training
+ end
+
private
def try_create_report(**params)