programming Cortana

I am trying to programm a litte application for Cortana.

My idea is, that i say: (I enabled the "Hey Cortana" feature)

Hey Cortana, Convert 45 degrees to farenheit

and I get (in the moment) a log in my Output window (Visual Studio). I tried to say exact this sentence and Cortana understood me perfectly, but Cortana opened the browser and entered it into Bing.

Why? What did I do wrong? I don't get any Syntax Error.

This is my Code:

// commands.xml

<?xml version="1.0" encoding="utf-8"?>
<VoiceCommands xmlns="http://schemas.microsoft.com/voicecommands/1.1">
  <CommandSet xml:lang="en-us" Name="MyCommands_en-us">
    <CommandPrefix> Convert, </CommandPrefix>
    <Example> Convert 45 degrees to farenheit </Example>

    <Command Name ="farenheitToDegrees">
      <Example> 73 farenheit to degrees</Example>
      <ListenFor> {farenheit} farenheit to degrees </ListenFor>
      <Feedback> {farenheit} are ... in degrees </Feedback>
      <Navigate/>
    </Command>

    <Command Name="degreesToFarenheit">
      <Example> 45 degrees to farenheit </Example>
      <ListenFor> {degrees} degrees to farenheit </ListenFor>
      <Feedback> {degrees} degrees are ... in fareneheit </Feedback>
      <Navigate/>
    </Command>

    <PhraseTopic Label="degrees" Scenario="Dictation">
      <Subject>Temperature</Subject>
    </PhraseTopic>

    <PhraseTopic Label="farenheit" Scenario="Dictation">
      <Subject>Temperature</Subject>
    </PhraseTopic>
  </CommandSet>
</VoiceCommands>

// App.xaml.cs

using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.InteropServices.WindowsRuntime;
using Windows.ApplicationModel;
using Windows.ApplicationModel.Activation;
using Windows.Foundation;
using Windows.Foundation.Collections;
using Windows.UI.Xaml;
using Windows.UI.Xaml.Controls;
using Windows.UI.Xaml.Controls.Primitives;
using Windows.UI.Xaml.Data;
using Windows.UI.Xaml.Input;
using Windows.UI.Xaml.Media;
using Windows.UI.Xaml.Navigation;

using Windows.ApplicationModel.VoiceCommands;
using Windows.Storage;
using Windows.Media.SpeechRecognition;

namespace HelloWorld
{
    /// <summary>
    /// Provides application-specific behavior to supplement the default  Application class.
    /// </summary>
    sealed partial class App : Application
    {
        /// <summary>
        /// Initializes the singleton application object.  This is the first line of authored code
        /// executed, and as such is the logical equivalent of main() or WinMain().
        /// </summary>
        public App()
        {
            Microsoft.ApplicationInsights.WindowsAppInitializer.InitializeAsync(
            Microsoft.ApplicationInsights.WindowsCollectors.Metadata |
            Microsoft.ApplicationInsights.WindowsCollectors.Session);
            this.InitializeComponent();
            this.Suspending += OnSuspending;
        }

        /// <summary>
        /// Invoked when the application is launched normally by the end user.  Other entry points
        /// will be used such as when the application is launched to open a specific file.
        /// </summary>
        /// <param name="e">Details about the launch request and process.</param>
        protected async override void OnLaunched(LaunchActivatedEventArgs e)
        {

#if DEBUG
            if (System.Diagnostics.Debugger.IsAttached)
            {
                this.DebugSettings.EnableFrameRateCounter = true;
            }
#endif

            Frame rootFrame = Window.Current.Content as Frame;

            // Do not repeat app initialization when the Window already has content,
            // just ensure that the window is active
            if (rootFrame == null)
            {
                // Create a Frame to act as the navigation context and navigate to the first page
                rootFrame = new Frame();

                rootFrame.NavigationFailed += OnNavigationFailed;

                if (e.PreviousExecutionState == ApplicationExecutionState.Terminated)
                {
                    //TODO: Load state from previously suspended application
                }

                // Place the frame in the current Window
                Window.Current.Content = rootFrame;
            }

            if (rootFrame.Content == null)
            {
                // When the navigation stack isn't restored navigate to the first page,
                // configuring the new page by passing required information as a navigation
                // parameter
                rootFrame.Navigate(typeof(MainPage), e.Arguments);
            }
            // Ensure the current window is active
            Window.Current.Activate();


            var storageFile =
              await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///commands.xml"));
            await
                Windows.ApplicationModel.VoiceCommands.VoiceCommandDefinitionManager.InstallCommandDefinitionsFromStorageFileAsync(storageFile);
        }

        protected override void OnActivated(IActivatedEventArgs e)
        {
            // Was the app activated by a voice command?
            if (e.Kind != Windows.ApplicationModel.Activation.ActivationKind.VoiceCommand)
            {
                return;
            }

            var commandArgs = e as Windows.ApplicationModel.Activation.VoiceCommandActivatedEventArgs;

        SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

        // Get the name of the voice command and the text spoken
        string voiceCommandName = speechRecognitionResult.RulePath[0];
        string textSpoken = speechRecognitionResult.Text;

        switch (voiceCommandName)
        {
            case "farenheitToDegrees":
                string farenheit = speechRecognitionResult.SemanticInterpretation.Properties["farenheit"][0];
                System.Diagnostics.Debug.WriteLine((Convert.ToInt32(farenheit) - 32) / 1.8);
                break;

            case "degreesToFarenheit":
                string degrees = speechRecognitionResult.SemanticInterpretation.Properties["degrees"][0];
                System.Diagnostics.Debug.WriteLine(Convert.ToInt32(degrees) * 1.8 + 32);
                break;

            default:
                System.Diagnostics.Debug.WriteLine("None of my bussiness");
                break;
        }
    }


    /// <summary>
    /// Invoked when Navigation to a certain page fails
    /// </summary>
    /// <param name="sender">The Frame which failed navigation</param>
    /// <param name="e">Details about the navigation failure</param>
    void OnNavigationFailed(object sender, NavigationFailedEventArgs e)
    {
        throw new Exception("Failed to load Page " + e.SourcePageType.FullName);
    }

    /// <summary>
    /// Invoked when application execution is being suspended.  Application state is saved
    /// without knowing whether the application will be terminated or resumed with the contents
    /// of memory still intact.
    /// </summary>
    /// <param name="sender">The source of the suspend request.</param>
    /// <param name="e">Details about the suspend request.</param>
    private void OnSuspending(object sender, SuspendingEventArgs e)
    {
        var deferral = e.SuspendingOperation.GetDeferral();
        //TODO: Save application state and stop any background activity
        deferral.Complete();
    }
}

}

can somebody help me?


The VCD definition file you've listed above doesn't have either a PhraseTopic or PhraseList to define the parts you've got in curly braces:

 <ListenFor> {farenheit} farenheit to degrees </ListenFor>

I'm guessing you probably wanted a PhraseTopic because that allows for an unconstrained dictation suitable for a wide range of numbers, something like this:

<PhraseTopic Label="farenheit" Scenario="Dictation">
   <Subject>Temperature</Subject>
</PhraseTopic>

See the spec for VCD's here on msdn, you might want to play with tweaking the Scenario value. This does mean you'll need to handle the text you get as the farenheit term yourself, of course, but typically dictated text for numbers comes through in textual '1234' form (but not in 100% of cases).


check the properties of your VCD file, values shoud be: Buil Action = Content, Copy to Output Directory = Copy always. Anyway, I hope that you registered the vcd file:

VoiceCommandService.InstallCommandSetsFromFileAsync(new Uri("ms-appx:///VCD.xml"));

Check this MVA video about Cortana: https://www.microsoftvirtualacademy.com/en-US/training-courses/universal-windows-app-development-with-cortana-and-the-speech-sdk-8487


Well... Seems that you have understood all the steps but still something is missing...

Here's an example that I have made regarding Cortana's foreground functionality:

Here's the VCD...

    <?xml version="1.0" encoding="utf-8" ?>
    <VoiceCommands xmlns="http://schemas.microsoft.com/voicecommands/1.2">
      <CommandSet xml:lang="en-us" Name="HomeControlCommandSet_en-us">
        <CommandPrefix>HomeControl</CommandPrefix>
        <Example>Control alarm, temperature, light and others</Example>

        <Command Name="Activate_Alarm">
          <Example>Activate alarm</Example>
          <ListenFor>[Would] [you] [please] activate [the] alarm [please]</ListenFor>
          <ListenFor RequireAppName="BeforeOrAfterPhrase">Activate alarm</ListenFor>
          <ListenFor RequireAppName="ExplicitlySpecified">Activate {builtin:AppName} alarm</ListenFor>
          <Feedback>Activating alarm</Feedback>
          <Navigate />
        </Command>

After create this definitions, you need to registry it at App Startup:

    protected async override void OnLaunched(LaunchActivatedEventArgs e)
    {
        ...
        // Install the VCD
        try
        {
            StorageFile vcdStorageFile = await Package.Current.InstalledLocation.GetFileAsync(@"HomeControlCommands.xml");
            await VoiceCommandDefinitionManager.InstallCommandDefinitionsFromStorageFileAsync(vcdStorageFile);
        }
        catch (Exception ex)
        {
            System.Diagnostics.Debug.WriteLine("There was an error registering the Voice Command Definitions", ex);
        }
    }

An then override App.OnActivated method to handle when the events are triggered:

    protected override void OnActivated(IActivatedEventArgs e)
    {
        // Handle when app is launched by Cortana
        if (e.Kind == ActivationKind.VoiceCommand)
        {
            VoiceCommandActivatedEventArgs commandArgs = e as VoiceCommandActivatedEventArgs;
            SpeechRecognitionResult speechRecognitionResult = commandArgs.Result;

            string voiceCommandName = speechRecognitionResult.RulePath[0];
            string textSpoken = speechRecognitionResult.Text;
            IReadOnlyList<string> recognizedVoiceCommandPhrases;

            System.Diagnostics.Debug.WriteLine("voiceCommandName: " + voiceCommandName);
            System.Diagnostics.Debug.WriteLine("textSpoken: " + textSpoken);

            switch (voiceCommandName)
            {
                case "Activate_Alarm":
                    System.Diagnostics.Debug.WriteLine("Activate_Alarm command");
                    break;

To see the complete tutorial, please visit this link and a working project is here. Also, if you interested in respond to the user through Cortana window, check this post regarding Cortana in background

链接地址: http://www.djcxy.com/p/87458.html

上一篇: 在PHP中显示和链接外键关键字

下一篇: 编程Cortana