Theory IsaSAT_VMTF_LLVM

theory IsaSAT_VMTF_LLVM
imports IsaSAT_VMTF IsaSAT_Sorting_LLVM
theory IsaSAT_VMTF_LLVM
imports Watched_Literals.WB_Sort IsaSAT_VMTF IsaSAT_Setup_LLVM
   Isabelle_LLVM.Sorting_Introsort
   IsaSAT_Sorting_LLVM
begin

(* TODO: Mathias! Only import the refinement stuff over a single point,
  and at this point, do all necessary adaptations.

  Currently, this point is Refine_Monadic_Thin

*)
(*declare is_None_def[simp del] *)


(*
lemma VMTF_Node_ref[sepref_fr_rules]:
  ‹(uncurry2 (return ooo VMTF_Node), uncurry2 (RETURN ooo VMTF_Node)) ∈
    uint64_nat_assnk *a (option_assn uint32_nat_assn)k *a (option_assn uint32_nat_assn)ka
    vmtf_node_assn›
  by sepref_to_hoare
   (sep_auto simp: vmtf_node_rel_def uint32_nat_rel_def br_def option_assn_alt_def
     split: option.splits)

lemma stamp_ref[sepref_fr_rules]:
  ‹(return o stamp, RETURN o stamp) ∈ vmtf_node_assnka uint64_nat_assn›
  by sepref_to_hoare
    (auto simp: ex_assn_move_out(2)[symmetric] return_cons_rule ent_ex_up_swap vmtf_node_rel_def
      simp del: ex_assn_move_out)

lemma get_next_ref[sepref_fr_rules]:
  ‹(return o get_next, RETURN o get_next) ∈ vmtf_node_assnka
   option_assn uint32_nat_assn›
  unfolding option_assn_pure_conv
  by sepref_to_hoare (sep_auto simp: return_cons_rule vmtf_node_rel_def)

lemma get_prev_ref[sepref_fr_rules]:
  ‹(return o get_prev, RETURN o get_prev) ∈ vmtf_node_assnka
   option_assn uint32_nat_assn›
  unfolding option_assn_pure_conv
  by sepref_to_hoare (sep_auto simp: return_cons_rule vmtf_node_rel_def)
*)


definition valid_atoms :: "nat_vmtf_node list ⇒ nat set" where
 "valid_atoms xs ≡ {i. i < length xs}"

definition VMTF_score_less where
  ‹VMTF_score_less xs i j ⟷ stamp (xs ! i) < stamp (xs ! j)›

definition mop_VMTF_score_less where
  ‹mop_VMTF_score_less xs i j = do {
    ASSERT(i < length xs);
    ASSERT(j < length xs);
    RETURN (stamp (xs ! i) < stamp (xs ! j))
  }›

sepref_register VMTF_score_less

sepref_def (in -) mop_VMTF_score_less_impl
  is ‹uncurry2 (mop_VMTF_score_less)›
  :: ‹(array_assn vmtf_node_assn)k *a atom_assnk *a atom_assnka bool1_assn›
  supply [[goals_limit = 1]]
  unfolding mop_VMTF_score_less_def
  apply (rewrite at ‹stamp (_ ! ⌑)› value_of_atm_def[symmetric])
  apply (rewrite at ‹stamp (_ ! ⌑)› in ‹_ < ⌑› value_of_atm_def[symmetric])
  unfolding index_of_atm_def[symmetric]
  by sepref


interpretation VMTF: weak_ordering_on_lt where
  C = "valid_atoms vs" and
  less = "VMTF_score_less vs"
  by unfold_locales
   (auto simp: VMTF_score_less_def split: if_splits)

interpretation VMTF: parameterized_weak_ordering valid_atoms VMTF_score_less
    mop_VMTF_score_less
  by unfold_locales
   (auto simp: mop_VMTF_score_less_def
     valid_atoms_def VMTF_score_less_def)


global_interpretation VMTF: parameterized_sort_impl_context
  "woarray_assn atom_assn" "eoarray_assn atom_assn" atom_assn
  return return
  eo_extract_impl
  array_upd
  valid_atoms VMTF_score_less mop_VMTF_score_less mop_VMTF_score_less_impl
  "array_assn vmtf_node_assn"
  defines
          VMTF_is_guarded_insert_impl = VMTF.is_guarded_param_insert_impl
      and VMTF_is_unguarded_insert_impl = VMTF.is_unguarded_param_insert_impl
      and VMTF_unguarded_insertion_sort_impl = VMTF.unguarded_insertion_sort_param_impl
      and VMTF_guarded_insertion_sort_impl = VMTF.guarded_insertion_sort_param_impl
      and VMTF_final_insertion_sort_impl = VMTF.final_insertion_sort_param_impl
      (*and VMTF_mop_lchild_impl  = VMTF.mop_lchild_impl
      and VMTF_mop_rchild_impl  = VMTF.mop_rchild_impl
      and VMTF_has_rchild_impl  = VMTF.has_rchild_impl
      and VMTF_has_lchild_impl  = VMTF.has_lchild_impl *)

      and VMTF_pcmpo_idxs_impl  = VMTF.pcmpo_idxs_impl
      and VMTF_pcmpo_v_idx_impl  = VMTF.pcmpo_v_idx_impl
      and VMTF_pcmpo_idx_v_impl  = VMTF.pcmpo_idx_v_impl
      and VMTF_pcmp_idxs_impl  = VMTF.pcmp_idxs_impl

      and VMTF_mop_geth_impl    = VMTF.mop_geth_impl
      and VMTF_mop_seth_impl    = VMTF.mop_seth_impl
      and VMTF_sift_down_impl   = VMTF.sift_down_impl
      and VMTF_heapify_btu_impl = VMTF.heapify_btu_impl
      and VMTF_heapsort_impl    = VMTF.heapsort_param_impl
      and VMTF_qsp_next_l_impl       = VMTF.qsp_next_l_impl
      and VMTF_qsp_next_h_impl       = VMTF.qsp_next_h_impl
      and VMTF_qs_partition_impl     = VMTF.qs_partition_impl
(*      and VMTF_qs_partitionXXX_impl     = VMTF.qs_partitionXXX_impl *)
      and VMTF_partition_pivot_impl  = VMTF.partition_pivot_impl
      and VMTF_introsort_aux_impl = VMTF.introsort_aux_param_impl
      and VMTF_introsort_impl        = VMTF.introsort_param_impl
      and VMTF_move_median_to_first_impl = VMTF.move_median_to_first_param_impl

  apply unfold_locales
  apply (rule eo_hnr_dep)+
  unfolding GEN_ALGO_def refines_param_relp_def (* TODO: thm gen_refines_param_relpI *)
  supply[[unify_trace_failure]]
  by (rule mop_VMTF_score_less_impl.refine)



global_interpretation
  VMTF_it: pure_eo_adapter atom_assn "arl64_assn atom_assn" arl_nth arl_upd
  defines VMTF_it_eo_extract_impl = VMTF_it.eo_extract_impl
  apply (rule al_pure_eo)
  by (simp add: safe_constraint_rules)



global_interpretation VMTF_it: parameterized_sort_impl_context
  where
    wo_assn = ‹arl64_assn atom_assn›
    and eo_assn = VMTF_it.eo_assn
    and elem_assn = atom_assn
    and to_eo_impl = return
    and to_wo_impl = return
    and extract_impl = VMTF_it_eo_extract_impl
    and set_impl = arl_upd
    and cdom = valid_atoms
    and pless = VMTF_score_less
    and pcmp = mop_VMTF_score_less
    and pcmp_impl = mop_VMTF_score_less_impl
    and cparam_assn = ‹array_assn vmtf_node_assn›
  defines
          VMTF_it_is_guarded_insert_impl = VMTF_it.is_guarded_param_insert_impl
      and VMTF_it_is_unguarded_insert_impl = VMTF_it.is_unguarded_param_insert_impl
      and VMTF_it_unguarded_insertion_sort_impl = VMTF_it.unguarded_insertion_sort_param_impl
      and VMTF_it_guarded_insertion_sort_impl = VMTF_it.guarded_insertion_sort_param_impl
      and VMTF_it_final_insertion_sort_impl = VMTF_it.final_insertion_sort_param_impl
      (*and VMTF_it_mop_lchild_impl  = VMTF_it.mop_lchild_impl
      and VMTF_it_mop_rchild_impl  = VMTF_it.mop_rchild_impl
      and VMTF_it_has_rchild_impl  = VMTF_it.has_rchild_impl
      and VMTF_it_has_lchild_impl  = VMTF_it.has_lchild_impl *)

      and VMTF_it_pcmpo_idxs_impl  = VMTF_it.pcmpo_idxs_impl
      and VMTF_it_pcmpo_v_idx_impl  = VMTF_it.pcmpo_v_idx_impl
      and VMTF_it_pcmpo_idx_v_impl  = VMTF_it.pcmpo_idx_v_impl
      and VMTF_it_pcmp_idxs_impl  = VMTF_it.pcmp_idxs_impl

      and VMTF_it_mop_geth_impl    = VMTF_it.mop_geth_impl
      and VMTF_it_mop_seth_impl    = VMTF_it.mop_seth_impl
      and VMTF_it_sift_down_impl   = VMTF_it.sift_down_impl
      and VMTF_it_heapify_btu_impl = VMTF_it.heapify_btu_impl
      and VMTF_it_heapsort_impl    = VMTF_it.heapsort_param_impl
      and VMTF_it_qsp_next_l_impl       = VMTF_it.qsp_next_l_impl
      and VMTF_it_qsp_next_h_impl       = VMTF_it.qsp_next_h_impl
      and VMTF_it_qs_partition_impl     = VMTF_it.qs_partition_impl
(*      and VMTF_it_qs_partitionXXX_impl     = VMTF_it.qs_partitionXXX_impl *)
      and VMTF_it_partition_pivot_impl  = VMTF_it.partition_pivot_impl
      and VMTF_it_introsort_aux_impl = VMTF_it.introsort_aux_param_impl
      and VMTF_it_introsort_impl        = VMTF_it.introsort_param_impl
      and VMTF_it_move_median_to_first_impl = VMTF_it.move_median_to_first_param_impl

  apply unfold_locales
  unfolding GEN_ALGO_def refines_param_relp_def (* TODO: thm gen_refines_param_relpI *)
  apply (rule mop_VMTF_score_less_impl.refine)
  done


lemmas [llvm_inline] = VMTF_it.eo_extract_impl_def[THEN meta_fun_cong, THEN meta_fun_cong]

print_named_simpset llvm_inline
export_llvm
  "VMTF_heapsort_impl :: _ ⇒ _ ⇒ _"
  "VMTF_introsort_impl :: _ ⇒ _ ⇒ _"

definition VMTF_sort_scores_raw :: ‹_› where
  ‹VMTF_sort_scores_raw = pslice_sort_spec valid_atoms VMTF_score_less›

definition VMTF_sort_scores :: ‹_› where
  ‹VMTF_sort_scores xs ys = VMTF_sort_scores_raw xs ys 0 (length ys)›

lemmas VMTF_introsort[sepref_fr_rules] =
  VMTF_it.introsort_param_impl_correct[unfolded VMTF_sort_scores_raw_def[symmetric] PR_CONST_def]

sepref_register VMTF_sort_scores_raw vmtf_reorder_list_raw

lemma VMTF_sort_scores_vmtf_reorder_list_raw:
  ‹(VMTF_sort_scores, vmtf_reorder_list_raw) ∈ Id → Id → ⟨Id⟩nres_rel›
  unfolding VMTF_sort_scores_def VMTF_sort_scores_raw_def pslice_sort_spec_def
    vmtf_reorder_list_raw_def
  apply (refine_rcg)
  subgoal by (auto simp: valid_atoms_def)
  subgoal for vm vm' arr arr'
    by (auto intro!: slice_sort_spec_refine_sort[THEN order_trans, of _ arr' arr']
    simp: valid_atoms_def slice_rel_def br_def reorder_list_def conc_fun_RES sort_spec_def
      eq_commute[of ‹length _› ‹length arr'›])
  done

sepref_def VMTF_sort_scores_raw_impl
  is ‹uncurry VMTF_sort_scores›
  :: ‹(IICF_Array.array_assn vmtf_node_assn)k *a VMTF_it.arr_assnda VMTF_it.arr_assn›
  unfolding VMTF_sort_scores_def
  apply (annot_snat_const "TYPE(64)")
  by sepref

lemmas[sepref_fr_rules] =
  VMTF_sort_scores_raw_impl.refine[FCOMP VMTF_sort_scores_vmtf_reorder_list_raw]

sepref_def VMTF_sort_scores_impl
  is ‹uncurry vmtf_reorder_list›
  :: ‹(vmtf_assn)k *a VMTF_it.arr_assnda VMTF_it.arr_assn›
  unfolding vmtf_reorder_list_def
  by sepref

sepref_def atoms_hash_del_code
  is ‹uncurry (RETURN oo atoms_hash_del)›
  :: ‹[uncurry atoms_hash_del_pre]a atom_assnk *a (atoms_hash_assn)d → atoms_hash_assn›
  unfolding atoms_hash_del_def atoms_hash_del_pre_def
  apply annot_all_atm_idxs
  by sepref

sepref_def atoms_hash_insert_code
  is ‹uncurry (RETURN oo atoms_hash_insert)›
  :: ‹[uncurry atms_hash_insert_pre]a
      atom_assnk *a (distinct_atoms_assn)d →  distinct_atoms_assn›
  unfolding atoms_hash_insert_def atms_hash_insert_pre_def
  supply [[goals_limit=1]]
  apply annot_all_atm_idxs
  by sepref


sepref_register find_decomp_wl_imp
sepref_register rescore_clause vmtf_flush
sepref_register vmtf_mark_to_rescore
sepref_register vmtf_mark_to_rescore_clause

sepref_register vmtf_mark_to_rescore_also_reasons get_the_propagation_reason_pol

sepref_register find_decomp_w_ns


sepref_def update_next_search_impl
  is ‹uncurry (RETURN oo update_next_search)›
  :: ‹(atom.option_assn)k *a vmtf_remove_assnda vmtf_remove_assn›
  supply [[goals_limit=1]]
  unfolding update_next_search_def vmtf_remove_assn_def
  by sepref

lemma case_option_split:
  ‹(case a of None ⇒ x | Some y ⇒ f y) =
   (if is_None a then x else let y = the a in f y)›
  by (auto split: option.splits)
(*lemma is_pure_snat_option[safe_constraint_rules]: ‹CONSTRAINT is_pure snat.option_assn›
  using snat.A_pure snat.option_assn_pure unfolding CONSTRAINT_def by blast
*)


sepref_def ns_vmtf_dequeue_code
   is ‹uncurry (RETURN oo ns_vmtf_dequeue)›
   :: ‹[vmtf_dequeue_pre]a
        atom_assnk *a (array_assn vmtf_node_assn)d → array_assn vmtf_node_assn›
  supply [[goals_limit = 1]]
  supply option.splits[split] if_splits[split]
  unfolding ns_vmtf_dequeue_def vmtf_dequeue_pre_alt_def case_option_split atom.fold_option
  apply annot_all_atm_idxs
  by sepref


sepref_register get_next get_prev stamp
lemma eq_Some_iff: "x = Some b ⟷ (¬is_None x ∧ the x = b)"
  by (cases x) auto

lemma hfref_refine_with_pre:
  assumes "⋀x. P x ⟹ g' x ≤ g x"
  assumes "(f,g') ∈ [P]ad A → R"
  shows "(f,g) ∈ [P]ad A → R"
  using assms(2)[THEN hfrefD] assms(1)
  by (auto intro!: hfrefI intro: hn_refine_ref)


lemma isa_vmtf_en_dequeue_preI:
  assumes "isa_vmtf_en_dequeue_pre ((M,L),(ns, m, fst_As, lst_As, next_search))"
  shows "fst_As < length ns" "L < length ns" "Suc m < max_unat 64"
    and "get_next (ns!L) = Some i ⟶ i < length ns"
    and "fst_As ≠ lst_As ⟶ get_prev (ns ! lst_As) ≠ None"
    and "get_next (ns ! fst_As) ≠ None ⟶ get_prev (ns ! lst_As) ≠ None"
  using assms
  unfolding isa_vmtf_en_dequeue_pre_def vmtf_dequeue_pre_def
  apply (auto simp: max_unat_def uint64_max_def sint64_max_def)
  done


find_theorems "_ ≠ None ⟷ _"

lemma isa_vmtf_en_dequeue_alt_def2:
   ‹isa_vmtf_en_dequeue_pre x ⟹ uncurry2 (λM L vm.
    case vm of (ns, m, fst_As, lst_As, next_search) ⇒ doN {
      ASSERT(L<length ns);
      nsL ← mop_list_get ns (index_of_atm L);
      let fst_As = (if fst_As = L then get_next nsL else (Some fst_As));

      let next_search = (if next_search = (Some L) then get_next nsL
                        else next_search);
      let lst_As = (if lst_As = L then get_prev nsL else (Some lst_As));
      ASSERT (vmtf_dequeue_pre (L,ns));
      let ns = ns_vmtf_dequeue L ns;
      ASSERT (defined_atm_pol_pre M L);
      let de = (defined_atm_pol M L);
      ASSERT (Suc m < max_unat 64);
      case fst_As of
        None ⇒ RETURN
          (ns[L := VMTF_Node m fst_As None], m + 1, L, L,
           if de then None else Some L)
      | Some fst_As ⇒ doN {
          ASSERT (L < length ns ∧ fst_As < length ns ∧ lst_As ≠ None);
          let fst_As' =
                VMTF_Node (stamp (ns ! fst_As)) (Some L)
                 (get_next (ns ! fst_As));
          RETURN (
            ns[L := VMTF_Node (m + 1) None (Some fst_As),
            fst_As := fst_As'],
            m + 1, L, the lst_As,
            if de then next_search else Some L)
      }
    }) x
  ≤ uncurry2 (isa_vmtf_en_dequeue) x
    ›
  unfolding isa_vmtf_en_dequeue_def vmtf_dequeue_def isa_vmtf_enqueue_def
    annot_unat_snat_upcast[symmetric] ASSN_ANNOT_def
  apply (cases x; simp add: Let_def)
  apply (simp
    only: pw_le_iff refine_pw_simps
    split: prod.splits
    )
  supply isa_vmtf_en_dequeue_preD[simp] (*isa_vmtf_en_dequeue_pre_vmtf_enqueue_pre[dest]*)
  apply (auto
    split!: if_splits option.splits
    simp: refine_pw_simps isa_vmtf_en_dequeue_preI dest: isa_vmtf_en_dequeue_preI
    simp del: not_None_eq
    (*dest: isa_vmtf_en_dequeue_preI*))
  done

(* TODO: This is a general setup to identify any numeral by id-op (numeral is already in Sepref_Id_Op.thy.)
  Note: Naked int/nat numerals will be rejected by translate, as they need to be type-annotated.
*)
sepref_register 1 0



lemma vmtf_en_dequeue_fast_codeI:
  assumes "isa_vmtf_en_dequeue_pre ((M, L),(ns,m,fst_As, lst_As, next_search))"
  shows "Suc m < max_unat 64"
  using assms
  unfolding isa_vmtf_en_dequeue_pre_def max_unat_def uint64_max_def
  by auto


schematic_goal mk_free_trail_pol_fast_assn[sepref_frame_free_rules]: "MK_FREE trail_pol_fast_assn ?fr"
  unfolding trail_pol_fast_assn_def
  by (rule free_thms sepref_frame_free_rules)+ (* TODO: Write a method for that! *)

sepref_def vmtf_en_dequeue_fast_code
   is ‹uncurry2 isa_vmtf_en_dequeue›
   :: ‹[isa_vmtf_en_dequeue_pre]a
        trail_pol_fast_assnk *a atom_assnk *a vmtf_assnd → vmtf_assn›
  apply (rule hfref_refine_with_pre[OF isa_vmtf_en_dequeue_alt_def2], assumption)

  supply [[goals_limit = 1]]
  unfolding isa_vmtf_en_dequeue_alt_def2 case_option_split eq_Some_iff
  apply (rewrite in "if ⌑ then get_next _ else _" short_circuit_conv)
  apply annot_all_atm_idxs
  apply (annot_unat_const "TYPE(64)")
  unfolding atom.fold_option
  unfolding fold_tuple_optimizations
  by sepref


sepref_register vmtf_rescale
sepref_def vmtf_rescale_code
   is ‹vmtf_rescale›
   :: ‹vmtf_assnda vmtf_assn›
  supply [[goals_limit = 1]]
  supply vmtf_en_dequeue_pre_def[simp]
  unfolding vmtf_rescale_alt_def update_stamp.simps
  unfolding atom.fold_option
  apply (annot_unat_const "TYPE(64)")
  apply annot_all_atm_idxs
  by sepref


sepref_register partition_between_ref

(*lemma partition_between_ref_vmtf_code_aux:
  "⟦(loi,lo)∈snat_rel' TYPE(64); (hii,hi)∈snat_rel' TYPE(64)⟧ ⟹ lo + (hi - lo) div 2 < max_snat 64"
  apply sepref_bounds
  apply (drule in_snat_rel_imp_less_max')+
  by auto
*)
sepref_register isa_vmtf_enqueue

(*
lemma uint64_nal_rel_le_uint64_max: ‹(a, b) ∈ uint64_nat_rel ⟹ b ≤ uint64_max›
  by (auto simp: uint64_nat_rel_def br_def nat_of_uint64_le_uint64_max)
*)
lemma emptied_list_alt_def: ‹emptied_list xs = take 0 xs›
  by (auto simp: emptied_list_def)

sepref_def current_stamp_impl
  is ‹RETURN o current_stamp›
  :: ‹vmtf_assnka uint64_nat_assn›
  unfolding current_stamp_alt_def
  by sepref



sepref_register isa_vmtf_en_dequeue

sepref_def isa_vmtf_flush_fast_code
   is ‹uncurry isa_vmtf_flush_int›
   :: ‹trail_pol_fast_assnk *a (vmtf_remove_assn)da
        vmtf_remove_assn›
  supply [[goals_limit = 1]]
  unfolding vmtf_flush_def PR_CONST_def isa_vmtf_flush_int_def
    current_stamp_def[symmetric] emptied_list_alt_def
    vmtf_remove_assn_def
  apply (rewrite at ‹If (_ - _ ≤ ⌑) _ _› annot_snat_unat_conv)
  apply (rewrite at ‹WHILEIT _ (λ(_, _, _)._ < ⌑)› annot_snat_unat_conv)
  apply (rewrite at ‹isa_vmtf_en_dequeue _ (_ ! ⌑)› annot_unat_snat_conv)
  apply (rewrite at ‹atoms_hash_del (_ ! ⌑)› annot_unat_snat_conv)
  apply (rewrite at ‹take ⌑ _› snat_const_fold[where 'a=64])
  apply (annot_unat_const "TYPE(64)")
  by sepref


sepref_register isa_vmtf_mark_to_rescore
sepref_def isa_vmtf_mark_to_rescore_code
  is ‹uncurry (RETURN oo isa_vmtf_mark_to_rescore)›
  :: ‹[uncurry isa_vmtf_mark_to_rescore_pre]a
     atom_assnk *a vmtf_remove_assnd → vmtf_remove_assn›
  supply [[goals_limit=1]] option.splits[split] vmtf_def[simp] in_ℒall_atm_of_in_atms_of_iff[simp]
    neq_NilE[elim!] literals_are_in_ℒin_add_mset[simp]
  unfolding isa_vmtf_mark_to_rescore_pre_def isa_vmtf_mark_to_rescore_def vmtf_remove_assn_def
  by sepref


sepref_register isa_vmtf_unset
sepref_def isa_vmtf_unset_code
  is ‹uncurry (RETURN oo isa_vmtf_unset)›
  :: ‹[uncurry vmtf_unset_pre]a
     atom_assnk *a vmtf_remove_assnd → vmtf_remove_assn›
  supply [[goals_limit=1]] option.splits[split] vmtf_def[simp] in_ℒall_atm_of_in_atms_of_iff[simp]
    neq_NilE[elim!] literals_are_in_ℒin_add_mset[simp]
  unfolding isa_vmtf_unset_def vmtf_unset_pre_def vmtf_remove_assn_def atom.fold_option
  apply (rewrite in ‹If (_ ∨ _)› short_circuit_conv)
  apply annot_all_atm_idxs
  by sepref


lemma isa_vmtf_mark_to_rescore_and_unsetI:  ‹
     atms_hash_insert_pre ak (ad, ba) ⟹
       isa_vmtf_mark_to_rescore_pre ak ((a, aa, ab, ac, Some ak'), ad, ba)›
  by (auto simp: isa_vmtf_mark_to_rescore_pre_def)

sepref_def vmtf_mark_to_rescore_and_unset_code
  is ‹uncurry (RETURN oo isa_vmtf_mark_to_rescore_and_unset)›
  :: ‹[isa_vmtf_mark_to_rescore_and_unset_pre]a
      atom_assnk *a vmtf_remove_assnd → vmtf_remove_assn›
  supply image_image[simp] uminus_𝒜in_iff[iff] in_diffD[dest] option.splits[split]
    if_splits[split] isa_vmtf_unset_def[simp] isa_vmtf_mark_to_rescore_and_unsetI[intro!]
  supply [[goals_limit=1]]
  unfolding isa_vmtf_mark_to_rescore_and_unset_def isa_vmtf_mark_to_rescore_and_unset_pre_def
    save_phase_def isa_vmtf_mark_to_rescore_and_unset_pre_def
  by sepref


sepref_def find_decomp_wl_imp_fast_code
  is ‹uncurry2 (isa_find_decomp_wl_imp)›
  :: ‹[λ((M, lev), vm). True]a trail_pol_fast_assnd *a uint32_nat_assnk *a vmtf_remove_assnd
    → trail_pol_fast_assn ×a vmtf_remove_assn›
  unfolding isa_find_decomp_wl_imp_def get_maximum_level_remove_def[symmetric] PR_CONST_def
    trail_pol_conv_to_no_CS_def
  supply trail_conv_to_no_CS_def[simp] lit_of_hd_trail_def[simp]
  supply [[goals_limit=1]] literals_are_in_ℒin_add_mset[simp]
  supply vmtf_unset_pre_def[simp]
  apply (rewrite at ‹let _ = _ - ⌑ in _› annot_unat_snat_upcast[where 'l=64])
  apply (annot_snat_const "TYPE(64)")
  by sepref


sepref_def vmtf_rescore_fast_code
  is ‹uncurry2 isa_vmtf_rescore›
  :: ‹clause_ll_assnk *a trail_pol_fast_assnk *a vmtf_remove_assnda
       vmtf_remove_assn›
  unfolding isa_vmtf_rescore_body_def[abs_def] PR_CONST_def isa_vmtf_rescore_def
  supply [[goals_limit = 1]] fold_is_None[simp]
  apply (annot_snat_const "TYPE(64)")
  by sepref


sepref_def find_decomp_wl_imp'_fast_code
  is ‹uncurry find_decomp_wl_st_int›
  :: ‹uint32_nat_assnk *a isasat_bounded_assnda
        isasat_bounded_assn›
  unfolding find_decomp_wl_st_int_def PR_CONST_def isasat_bounded_assn_def
  supply [[goals_limit = 1]]
  unfolding fold_tuple_optimizations
  by sepref


lemma (in -) arena_is_valid_clause_idx_le_uint64_max:
  ‹arena_is_valid_clause_idx be bd ⟹
    length be ≤ sint64_max ⟹
   bd + arena_length be bd < max_snat 64›
  ‹arena_is_valid_clause_idx be bd ⟹ length be ≤ sint64_max ⟹
   bd < max_snat 64›
  using arena_lifting(10)[of be _ _ bd] unfolding max_snat_def sint64_max_def
  by (fastforce simp: arena_lifting arena_is_valid_clause_idx_def)+

sepref_def vmtf_mark_to_rescore_clause_fast_code
  is ‹uncurry2 (isa_vmtf_mark_to_rescore_clause)›
  :: ‹[λ((N, _), _). length N ≤ sint64_max]a
       arena_fast_assnk *a sint64_nat_assnk *a vmtf_remove_assnd → vmtf_remove_assn›
  supply [[goals_limit=1]] arena_is_valid_clause_idx_le_uint64_max[intro]
  unfolding isa_vmtf_mark_to_rescore_clause_def PR_CONST_def
  unfolding while_eq_nfoldli[symmetric]
  apply (subst while_upt_while_direct, simp)
  unfolding nres_monad3
  apply (annot_snat_const "TYPE(64)")
  by sepref


sepref_def vmtf_mark_to_rescore_also_reasons_fast_code
  is ‹uncurry3 (isa_vmtf_mark_to_rescore_also_reasons)›
  :: ‹[λ(((_, N), _), _). length N ≤ sint64_max]a
      trail_pol_fast_assnk *a arena_fast_assnk *a out_learned_assnk *a vmtf_remove_assnd →
      vmtf_remove_assn›
  supply image_image[simp] uminus_𝒜in_iff[iff] in_diffD[dest] option.splits[split]
    in_ℒall_atm_of_𝒜in[simp]
  supply [[goals_limit=1]]
  unfolding isa_vmtf_mark_to_rescore_also_reasons_def PR_CONST_def
  unfolding while_eq_nfoldli[symmetric]
  apply (subst while_upt_while_direct, simp)
  apply (annot_snat_const "TYPE(64)")
  unfolding  nres_monad3 case_option_split
  by sepref

experiment begin

export_llvm
  ns_vmtf_dequeue_code
  atoms_hash_del_code
  atoms_hash_insert_code
  update_next_search_impl
  ns_vmtf_dequeue_code
  vmtf_en_dequeue_fast_code
  vmtf_rescale_code
  current_stamp_impl
  isa_vmtf_flush_fast_code
  isa_vmtf_mark_to_rescore_code
  isa_vmtf_unset_code
  vmtf_mark_to_rescore_and_unset_code
  find_decomp_wl_imp_fast_code
  vmtf_rescore_fast_code
  find_decomp_wl_imp'_fast_code
  vmtf_mark_to_rescore_clause_fast_code
  vmtf_mark_to_rescore_also_reasons_fast_code

end

end